core_validation.cpp revision c70c914e514d6ee222af18505e6d4ce8387c3fa2
1/* Copyright (c) 2015-2017 The Khronos Group Inc.
2 * Copyright (c) 2015-2017 Valve Corporation
3 * Copyright (c) 2015-2017 LunarG, Inc.
4 * Copyright (C) 2015-2017 Google Inc.
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 *     http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * Author: Cody Northrop <cnorthrop@google.com>
19 * Author: Michael Lentine <mlentine@google.com>
20 * Author: Tobin Ehlis <tobine@google.com>
21 * Author: Chia-I Wu <olv@google.com>
22 * Author: Chris Forbes <chrisf@ijw.co.nz>
23 * Author: Mark Lobodzinski <mark@lunarg.com>
24 * Author: Ian Elliott <ianelliott@google.com>
25 * Author: Dave Houlton <daveh@lunarg.com>
26 * Author: Dustin Graves <dustin@lunarg.com>
27 * Author: Jeremy Hayes <jeremy@lunarg.com>
28 * Author: Jon Ashburn <jon@lunarg.com>
29 * Author: Karl Schultz <karl@lunarg.com>
30 * Author: Mark Young <marky@lunarg.com>
31 * Author: Mike Schuchardt <mikes@lunarg.com>
32 * Author: Mike Weiblen <mikew@lunarg.com>
33 * Author: Tony Barbour <tony@LunarG.com>
34 */
35
36// Allow use of STL min and max functions in Windows
37#define NOMINMAX
38
39#include <SPIRV/spirv.hpp>
40#include <algorithm>
41#include <assert.h>
42#include <iostream>
43#include <list>
44#include <map>
45#include <mutex>
46#include <set>
47#include <sstream>
48#include <stdio.h>
49#include <stdlib.h>
50#include <string.h>
51#include <string>
52#include <tuple>
53
54#include "vk_loader_platform.h"
55#include "vk_dispatch_table_helper.h"
56#include "vk_enum_string_helper.h"
57#if defined(__GNUC__)
58#pragma GCC diagnostic ignored "-Wwrite-strings"
59#endif
60#if defined(__GNUC__)
61#pragma GCC diagnostic warning "-Wwrite-strings"
62#endif
63#include "core_validation.h"
64#include "buffer_validation.h"
65#include "vk_layer_table.h"
66#include "vk_layer_data.h"
67#include "vk_layer_extension_utils.h"
68#include "vk_layer_utils.h"
69#include "spirv-tools/libspirv.h"
70
71#if defined __ANDROID__
72#include <android/log.h>
73#define LOGCONSOLE(...) ((void)__android_log_print(ANDROID_LOG_INFO, "DS", __VA_ARGS__))
74#else
75#define LOGCONSOLE(...)      \
76    {                        \
77        printf(__VA_ARGS__); \
78        printf("\n");        \
79    }
80#endif
81
82// This intentionally includes a cpp file
83#include "vk_safe_struct.cpp"
84
85namespace core_validation {
86
87using std::unordered_map;
88using std::unordered_set;
89using std::unique_ptr;
90using std::vector;
91using std::string;
92using std::stringstream;
93using std::max;
94
95// WSI Image Objects bypass usual Image Object creation methods.  A special Memory
96// Object value will be used to identify them internally.
97static const VkDeviceMemory MEMTRACKER_SWAP_CHAIN_IMAGE_KEY = (VkDeviceMemory)(-1);
98// 2nd special memory handle used to flag object as unbound from memory
99static const VkDeviceMemory MEMORY_UNBOUND = VkDeviceMemory(~((uint64_t)(0)) - 1);
100
101// A special value of (0xFFFFFFFF, 0xFFFFFFFF) indicates that the surface size will be determined
102// by the extent of a swapchain targeting the surface.
103static const uint32_t kSurfaceSizeFromSwapchain = 0xFFFFFFFFu;
104
105// fwd decls
106struct shader_module;
107
108struct instance_layer_data {
109    VkInstance instance = VK_NULL_HANDLE;
110    debug_report_data *report_data = nullptr;
111    std::vector<VkDebugReportCallbackEXT> logging_callback;
112    VkLayerInstanceDispatchTable dispatch_table;
113
114    CALL_STATE vkEnumeratePhysicalDevicesState = UNCALLED;
115    uint32_t physical_devices_count = 0;
116    CALL_STATE vkEnumeratePhysicalDeviceGroupsState = UNCALLED;
117    uint32_t physical_device_groups_count = 0;
118    CHECK_DISABLED disabled = {};
119
120    unordered_map<VkPhysicalDevice, PHYSICAL_DEVICE_STATE> physical_device_map;
121    unordered_map<VkSurfaceKHR, SURFACE_STATE> surface_map;
122
123    bool surfaceExtensionEnabled = false;
124    bool displayExtensionEnabled = false;
125    bool androidSurfaceExtensionEnabled = false;
126    bool mirSurfaceExtensionEnabled = false;
127    bool waylandSurfaceExtensionEnabled = false;
128    bool win32SurfaceExtensionEnabled = false;
129    bool xcbSurfaceExtensionEnabled = false;
130    bool xlibSurfaceExtensionEnabled = false;
131};
132
133struct layer_data {
134    debug_report_data *report_data = nullptr;
135    VkLayerDispatchTable dispatch_table;
136
137    devExts device_extensions = {};
138    unordered_set<VkQueue> queues;  // All queues under given device
139    // Global set of all cmdBuffers that are inFlight on this device
140    unordered_set<VkCommandBuffer> globalInFlightCmdBuffers;
141    // Layer specific data
142    unordered_map<VkSampler, unique_ptr<SAMPLER_STATE>> samplerMap;
143    unordered_map<VkImageView, unique_ptr<IMAGE_VIEW_STATE>> imageViewMap;
144    unordered_map<VkImage, unique_ptr<IMAGE_STATE>> imageMap;
145    unordered_map<VkBufferView, unique_ptr<BUFFER_VIEW_STATE>> bufferViewMap;
146    unordered_map<VkBuffer, unique_ptr<BUFFER_STATE>> bufferMap;
147    unordered_map<VkPipeline, PIPELINE_STATE *> pipelineMap;
148    unordered_map<VkCommandPool, COMMAND_POOL_NODE> commandPoolMap;
149    unordered_map<VkDescriptorPool, DESCRIPTOR_POOL_STATE *> descriptorPoolMap;
150    unordered_map<VkDescriptorSet, cvdescriptorset::DescriptorSet *> setMap;
151    unordered_map<VkDescriptorSetLayout, cvdescriptorset::DescriptorSetLayout *> descriptorSetLayoutMap;
152    unordered_map<VkPipelineLayout, PIPELINE_LAYOUT_NODE> pipelineLayoutMap;
153    unordered_map<VkDeviceMemory, unique_ptr<DEVICE_MEM_INFO>> memObjMap;
154    unordered_map<VkFence, FENCE_NODE> fenceMap;
155    unordered_map<VkQueue, QUEUE_STATE> queueMap;
156    unordered_map<VkEvent, EVENT_STATE> eventMap;
157    unordered_map<QueryObject, bool> queryToStateMap;
158    unordered_map<VkQueryPool, QUERY_POOL_NODE> queryPoolMap;
159    unordered_map<VkSemaphore, SEMAPHORE_NODE> semaphoreMap;
160    unordered_map<VkCommandBuffer, GLOBAL_CB_NODE *> commandBufferMap;
161    unordered_map<VkFramebuffer, unique_ptr<FRAMEBUFFER_STATE>> frameBufferMap;
162    unordered_map<VkImage, vector<ImageSubresourcePair>> imageSubresourceMap;
163    unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> imageLayoutMap;
164    unordered_map<VkRenderPass, unique_ptr<RENDER_PASS_STATE>> renderPassMap;
165    unordered_map<VkShaderModule, unique_ptr<shader_module>> shaderModuleMap;
166    unordered_map<VkDescriptorUpdateTemplateKHR, unique_ptr<TEMPLATE_STATE>> desc_template_map;
167
168    VkDevice device = VK_NULL_HANDLE;
169    VkPhysicalDevice physical_device = VK_NULL_HANDLE;
170
171    instance_layer_data *instance_data = nullptr;  // from device to enclosing instance
172
173    VkPhysicalDeviceFeatures enabled_features = {};
174    // Device specific data
175    PHYS_DEV_PROPERTIES_NODE phys_dev_properties = {};
176    VkPhysicalDeviceMemoryProperties phys_dev_mem_props = {};
177    VkPhysicalDeviceProperties phys_dev_props = {};
178};
179
180// TODO : Do we need to guard access to layer_data_map w/ lock?
181static unordered_map<void *, layer_data *> layer_data_map;
182static unordered_map<void *, instance_layer_data *> instance_layer_data_map;
183
184static uint32_t loader_layer_if_version = CURRENT_LOADER_LAYER_INTERFACE_VERSION;
185
186static const VkLayerProperties global_layer = {
187    "VK_LAYER_LUNARG_core_validation", VK_LAYER_API_VERSION, 1, "LunarG Validation Layer",
188};
189
190template <class TCreateInfo>
191void ValidateLayerOrdering(const TCreateInfo &createInfo) {
192    bool foundLayer = false;
193    for (uint32_t i = 0; i < createInfo.enabledLayerCount; ++i) {
194        if (!strcmp(createInfo.ppEnabledLayerNames[i], global_layer.layerName)) {
195            foundLayer = true;
196        }
197        // This has to be logged to console as we don't have a callback at this point.
198        if (!foundLayer && !strcmp(createInfo.ppEnabledLayerNames[0], "VK_LAYER_GOOGLE_unique_objects")) {
199            LOGCONSOLE("Cannot activate layer VK_LAYER_GOOGLE_unique_objects prior to activating %s.", global_layer.layerName);
200        }
201    }
202}
203
204// Code imported from shader_checker
205static void build_def_index(shader_module *);
206
207// A forward iterator over spirv instructions. Provides easy access to len, opcode, and content words
208// without the caller needing to care too much about the physical SPIRV module layout.
209struct spirv_inst_iter {
210    std::vector<uint32_t>::const_iterator zero;
211    std::vector<uint32_t>::const_iterator it;
212
213    uint32_t len() {
214        auto result = *it >> 16;
215        assert(result > 0);
216        return result;
217    }
218
219    uint32_t opcode() { return *it & 0x0ffffu; }
220
221    uint32_t const &word(unsigned n) {
222        assert(n < len());
223        return it[n];
224    }
225
226    uint32_t offset() { return (uint32_t)(it - zero); }
227
228    spirv_inst_iter() {}
229
230    spirv_inst_iter(std::vector<uint32_t>::const_iterator zero, std::vector<uint32_t>::const_iterator it) : zero(zero), it(it) {}
231
232    bool operator==(spirv_inst_iter const &other) { return it == other.it; }
233
234    bool operator!=(spirv_inst_iter const &other) { return it != other.it; }
235
236    spirv_inst_iter operator++(int) {  // x++
237        spirv_inst_iter ii = *this;
238        it += len();
239        return ii;
240    }
241
242    spirv_inst_iter operator++() {  // ++x;
243        it += len();
244        return *this;
245    }
246
247    // The iterator and the value are the same thing.
248    spirv_inst_iter &operator*() { return *this; }
249    spirv_inst_iter const &operator*() const { return *this; }
250};
251
252struct shader_module {
253    // The spirv image itself
254    vector<uint32_t> words;
255    // A mapping of <id> to the first word of its def. this is useful because walking type
256    // trees, constant expressions, etc requires jumping all over the instruction stream.
257    unordered_map<unsigned, unsigned> def_index;
258    bool has_valid_spirv;
259
260    shader_module(VkShaderModuleCreateInfo const *pCreateInfo)
261        : words((uint32_t *)pCreateInfo->pCode, (uint32_t *)pCreateInfo->pCode + pCreateInfo->codeSize / sizeof(uint32_t)),
262          def_index(),
263          has_valid_spirv(true) {
264        build_def_index(this);
265    }
266
267    shader_module() : has_valid_spirv(false) {}
268
269    // Expose begin() / end() to enable range-based for
270    spirv_inst_iter begin() const { return spirv_inst_iter(words.begin(), words.begin() + 5); }  // First insn
271    spirv_inst_iter end() const { return spirv_inst_iter(words.begin(), words.end()); }          // Just past last insn
272    // Given an offset into the module, produce an iterator there.
273    spirv_inst_iter at(unsigned offset) const { return spirv_inst_iter(words.begin(), words.begin() + offset); }
274
275    // Gets an iterator to the definition of an id
276    spirv_inst_iter get_def(unsigned id) const {
277        auto it = def_index.find(id);
278        if (it == def_index.end()) {
279            return end();
280        }
281        return at(it->second);
282    }
283};
284
285// TODO : This can be much smarter, using separate locks for separate global data
286static std::mutex global_lock;
287
288// Return IMAGE_VIEW_STATE ptr for specified imageView or else NULL
289IMAGE_VIEW_STATE *GetImageViewState(const layer_data *dev_data, VkImageView image_view) {
290    auto iv_it = dev_data->imageViewMap.find(image_view);
291    if (iv_it == dev_data->imageViewMap.end()) {
292        return nullptr;
293    }
294    return iv_it->second.get();
295}
296// Return sampler node ptr for specified sampler or else NULL
297SAMPLER_STATE *GetSamplerState(const layer_data *dev_data, VkSampler sampler) {
298    auto sampler_it = dev_data->samplerMap.find(sampler);
299    if (sampler_it == dev_data->samplerMap.end()) {
300        return nullptr;
301    }
302    return sampler_it->second.get();
303}
304// Return image state ptr for specified image or else NULL
305IMAGE_STATE *GetImageState(const layer_data *dev_data, VkImage image) {
306    auto img_it = dev_data->imageMap.find(image);
307    if (img_it == dev_data->imageMap.end()) {
308        return nullptr;
309    }
310    return img_it->second.get();
311}
312// Return buffer state ptr for specified buffer or else NULL
313BUFFER_STATE *GetBufferState(const layer_data *dev_data, VkBuffer buffer) {
314    auto buff_it = dev_data->bufferMap.find(buffer);
315    if (buff_it == dev_data->bufferMap.end()) {
316        return nullptr;
317    }
318    return buff_it->second.get();
319}
320// Return swapchain node for specified swapchain or else NULL
321SWAPCHAIN_NODE *GetSwapchainNode(const layer_data *dev_data, VkSwapchainKHR swapchain) {
322    auto swp_it = dev_data->device_extensions.swapchainMap.find(swapchain);
323    if (swp_it == dev_data->device_extensions.swapchainMap.end()) {
324        return nullptr;
325    }
326    return swp_it->second.get();
327}
328// Return swapchain for specified image or else NULL
329VkSwapchainKHR GetSwapchainFromImage(const layer_data *dev_data, VkImage image) {
330    auto img_it = dev_data->device_extensions.imageToSwapchainMap.find(image);
331    if (img_it == dev_data->device_extensions.imageToSwapchainMap.end()) {
332        return VK_NULL_HANDLE;
333    }
334    return img_it->second;
335}
336// Return buffer node ptr for specified buffer or else NULL
337BUFFER_VIEW_STATE *GetBufferViewState(const layer_data *dev_data, VkBufferView buffer_view) {
338    auto bv_it = dev_data->bufferViewMap.find(buffer_view);
339    if (bv_it == dev_data->bufferViewMap.end()) {
340        return nullptr;
341    }
342    return bv_it->second.get();
343}
344
345FENCE_NODE *GetFenceNode(layer_data *dev_data, VkFence fence) {
346    auto it = dev_data->fenceMap.find(fence);
347    if (it == dev_data->fenceMap.end()) {
348        return nullptr;
349    }
350    return &it->second;
351}
352
353EVENT_STATE *GetEventNode(layer_data *dev_data, VkEvent event) {
354    auto it = dev_data->eventMap.find(event);
355    if (it == dev_data->eventMap.end()) {
356        return nullptr;
357    }
358    return &it->second;
359}
360
361QUERY_POOL_NODE *GetQueryPoolNode(layer_data *dev_data, VkQueryPool query_pool) {
362    auto it = dev_data->queryPoolMap.find(query_pool);
363    if (it == dev_data->queryPoolMap.end()) {
364        return nullptr;
365    }
366    return &it->second;
367}
368
369QUEUE_STATE *GetQueueState(layer_data *dev_data, VkQueue queue) {
370    auto it = dev_data->queueMap.find(queue);
371    if (it == dev_data->queueMap.end()) {
372        return nullptr;
373    }
374    return &it->second;
375}
376
377SEMAPHORE_NODE *GetSemaphoreNode(layer_data *dev_data, VkSemaphore semaphore) {
378    auto it = dev_data->semaphoreMap.find(semaphore);
379    if (it == dev_data->semaphoreMap.end()) {
380        return nullptr;
381    }
382    return &it->second;
383}
384
385COMMAND_POOL_NODE *GetCommandPoolNode(layer_data *dev_data, VkCommandPool pool) {
386    auto it = dev_data->commandPoolMap.find(pool);
387    if (it == dev_data->commandPoolMap.end()) {
388        return nullptr;
389    }
390    return &it->second;
391}
392
393PHYSICAL_DEVICE_STATE *GetPhysicalDeviceState(instance_layer_data *instance_data, VkPhysicalDevice phys) {
394    auto it = instance_data->physical_device_map.find(phys);
395    if (it == instance_data->physical_device_map.end()) {
396        return nullptr;
397    }
398    return &it->second;
399}
400
401SURFACE_STATE *GetSurfaceState(instance_layer_data *instance_data, VkSurfaceKHR surface) {
402    auto it = instance_data->surface_map.find(surface);
403    if (it == instance_data->surface_map.end()) {
404        return nullptr;
405    }
406    return &it->second;
407}
408
409// Return ptr to memory binding for given handle of specified type
410static BINDABLE *GetObjectMemBinding(layer_data *dev_data, uint64_t handle, VulkanObjectType type) {
411    switch (type) {
412        case kVulkanObjectTypeImage:
413            return GetImageState(dev_data, VkImage(handle));
414        case kVulkanObjectTypeBuffer:
415            return GetBufferState(dev_data, VkBuffer(handle));
416        default:
417            break;
418    }
419    return nullptr;
420}
421// prototype
422GLOBAL_CB_NODE *GetCBNode(layer_data const *, const VkCommandBuffer);
423
424// Return ptr to info in map container containing mem, or NULL if not found
425//  Calls to this function should be wrapped in mutex
426DEVICE_MEM_INFO *GetMemObjInfo(const layer_data *dev_data, const VkDeviceMemory mem) {
427    auto mem_it = dev_data->memObjMap.find(mem);
428    if (mem_it == dev_data->memObjMap.end()) {
429        return NULL;
430    }
431    return mem_it->second.get();
432}
433
434static void add_mem_obj_info(layer_data *dev_data, void *object, const VkDeviceMemory mem,
435                             const VkMemoryAllocateInfo *pAllocateInfo) {
436    assert(object != NULL);
437
438    dev_data->memObjMap[mem] = unique_ptr<DEVICE_MEM_INFO>(new DEVICE_MEM_INFO(object, mem, pAllocateInfo));
439}
440
441// For given bound_object_handle, bound to given mem allocation, verify that the range for the bound object is valid
442static bool ValidateMemoryIsValid(layer_data *dev_data, VkDeviceMemory mem, uint64_t bound_object_handle, VulkanObjectType type,
443                                  const char *functionName) {
444    DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
445    if (mem_info) {
446        if (!mem_info->bound_ranges[bound_object_handle].valid) {
447            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
448                           reinterpret_cast<uint64_t &>(mem), __LINE__, MEMTRACK_INVALID_MEM_REGION, "MEM",
449                           "%s: Cannot read invalid region of memory allocation 0x%" PRIx64 " for bound %s object 0x%" PRIx64
450                           ", please fill the memory before using.",
451                           functionName, reinterpret_cast<uint64_t &>(mem), object_string[type], bound_object_handle);
452        }
453    }
454    return false;
455}
456// For given image_state
457//  If mem is special swapchain key, then verify that image_state valid member is true
458//  Else verify that the image's bound memory range is valid
459bool ValidateImageMemoryIsValid(layer_data *dev_data, IMAGE_STATE *image_state, const char *functionName) {
460    if (image_state->binding.mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
461        if (!image_state->valid) {
462            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
463                           reinterpret_cast<uint64_t &>(image_state->binding.mem), __LINE__, MEMTRACK_INVALID_MEM_REGION, "MEM",
464                           "%s: Cannot read invalid swapchain image 0x%" PRIx64 ", please fill the memory before using.",
465                           functionName, reinterpret_cast<uint64_t &>(image_state->image));
466        }
467    } else {
468        return ValidateMemoryIsValid(dev_data, image_state->binding.mem, reinterpret_cast<uint64_t &>(image_state->image),
469                                     kVulkanObjectTypeImage, functionName);
470    }
471    return false;
472}
473// For given buffer_state, verify that the range it's bound to is valid
474bool ValidateBufferMemoryIsValid(layer_data *dev_data, BUFFER_STATE *buffer_state, const char *functionName) {
475    return ValidateMemoryIsValid(dev_data, buffer_state->binding.mem, reinterpret_cast<uint64_t &>(buffer_state->buffer),
476                                 kVulkanObjectTypeBuffer, functionName);
477}
478// For the given memory allocation, set the range bound by the given handle object to the valid param value
479static void SetMemoryValid(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, bool valid) {
480    DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
481    if (mem_info) {
482        mem_info->bound_ranges[handle].valid = valid;
483    }
484}
485// For given image node
486//  If mem is special swapchain key, then set entire image_state to valid param value
487//  Else set the image's bound memory range to valid param value
488void SetImageMemoryValid(layer_data *dev_data, IMAGE_STATE *image_state, bool valid) {
489    if (image_state->binding.mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
490        image_state->valid = valid;
491    } else {
492        SetMemoryValid(dev_data, image_state->binding.mem, reinterpret_cast<uint64_t &>(image_state->image), valid);
493    }
494}
495// For given buffer node set the buffer's bound memory range to valid param value
496void SetBufferMemoryValid(layer_data *dev_data, BUFFER_STATE *buffer_state, bool valid) {
497    SetMemoryValid(dev_data, buffer_state->binding.mem, reinterpret_cast<uint64_t &>(buffer_state->buffer), valid);
498}
499
500// Create binding link between given sampler and command buffer node
501void AddCommandBufferBindingSampler(GLOBAL_CB_NODE *cb_node, SAMPLER_STATE *sampler_state) {
502    sampler_state->cb_bindings.insert(cb_node);
503    cb_node->object_bindings.insert(
504        {reinterpret_cast<uint64_t &>(sampler_state->sampler), kVulkanObjectTypeSampler });
505}
506
507// Create binding link between given image node and command buffer node
508void AddCommandBufferBindingImage(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, IMAGE_STATE *image_state) {
509    // Skip validation if this image was created through WSI
510    if (image_state->binding.mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
511        // First update CB binding in MemObj mini CB list
512        for (auto mem_binding : image_state->GetBoundMemory()) {
513            DEVICE_MEM_INFO *pMemInfo = GetMemObjInfo(dev_data, mem_binding);
514            if (pMemInfo) {
515                pMemInfo->cb_bindings.insert(cb_node);
516                // Now update CBInfo's Mem reference list
517                cb_node->memObjs.insert(mem_binding);
518            }
519        }
520        // Now update cb binding for image
521        cb_node->object_bindings.insert({reinterpret_cast<uint64_t &>(image_state->image), kVulkanObjectTypeImage });
522        image_state->cb_bindings.insert(cb_node);
523    }
524}
525
526// Create binding link between given image view node and its image with command buffer node
527void AddCommandBufferBindingImageView(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, IMAGE_VIEW_STATE *view_state) {
528    // First add bindings for imageView
529    view_state->cb_bindings.insert(cb_node);
530    cb_node->object_bindings.insert(
531        {reinterpret_cast<uint64_t &>(view_state->image_view), kVulkanObjectTypeImageView });
532    auto image_state = GetImageState(dev_data, view_state->create_info.image);
533    // Add bindings for image within imageView
534    if (image_state) {
535        AddCommandBufferBindingImage(dev_data, cb_node, image_state);
536    }
537}
538
539// Create binding link between given buffer node and command buffer node
540void AddCommandBufferBindingBuffer(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, BUFFER_STATE *buffer_state) {
541    // First update CB binding in MemObj mini CB list
542    for (auto mem_binding : buffer_state->GetBoundMemory()) {
543        DEVICE_MEM_INFO *pMemInfo = GetMemObjInfo(dev_data, mem_binding);
544        if (pMemInfo) {
545            pMemInfo->cb_bindings.insert(cb_node);
546            // Now update CBInfo's Mem reference list
547            cb_node->memObjs.insert(mem_binding);
548        }
549    }
550    // Now update cb binding for buffer
551    cb_node->object_bindings.insert({reinterpret_cast<uint64_t &>(buffer_state->buffer), kVulkanObjectTypeBuffer });
552    buffer_state->cb_bindings.insert(cb_node);
553}
554
555// Create binding link between given buffer view node and its buffer with command buffer node
556void AddCommandBufferBindingBufferView(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, BUFFER_VIEW_STATE *view_state) {
557    // First add bindings for bufferView
558    view_state->cb_bindings.insert(cb_node);
559    cb_node->object_bindings.insert(
560        {reinterpret_cast<uint64_t &>(view_state->buffer_view), kVulkanObjectTypeBufferView });
561    auto buffer_state = GetBufferState(dev_data, view_state->create_info.buffer);
562    // Add bindings for buffer within bufferView
563    if (buffer_state) {
564        AddCommandBufferBindingBuffer(dev_data, cb_node, buffer_state);
565    }
566}
567
568// For every mem obj bound to particular CB, free bindings related to that CB
569static void clear_cmd_buf_and_mem_references(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
570    if (cb_node) {
571        if (cb_node->memObjs.size() > 0) {
572            for (auto mem : cb_node->memObjs) {
573                DEVICE_MEM_INFO *pInfo = GetMemObjInfo(dev_data, mem);
574                if (pInfo) {
575                    pInfo->cb_bindings.erase(cb_node);
576                }
577            }
578            cb_node->memObjs.clear();
579        }
580        cb_node->validate_functions.clear();
581    }
582}
583
584// Clear a single object binding from given memory object, or report error if binding is missing
585static bool ClearMemoryObjectBinding(layer_data *dev_data, uint64_t handle, VulkanObjectType type, VkDeviceMemory mem) {
586    DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
587    // This obj is bound to a memory object. Remove the reference to this object in that memory object's list
588    if (mem_info) {
589        mem_info->obj_bindings.erase({handle, type});
590    }
591    return false;
592}
593
594// ClearMemoryObjectBindings clears the binding of objects to memory
595//  For the given object it pulls the memory bindings and makes sure that the bindings
596//  no longer refer to the object being cleared. This occurs when objects are destroyed.
597bool ClearMemoryObjectBindings(layer_data *dev_data, uint64_t handle, VulkanObjectType type) {
598    bool skip = false;
599    BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
600    if (mem_binding) {
601        if (!mem_binding->sparse) {
602            skip = ClearMemoryObjectBinding(dev_data, handle, type, mem_binding->binding.mem);
603        } else {  // Sparse, clear all bindings
604            for (auto &sparse_mem_binding : mem_binding->sparse_bindings) {
605                skip |= ClearMemoryObjectBinding(dev_data, handle, type, sparse_mem_binding.mem);
606            }
607        }
608    }
609    return skip;
610}
611
612// For given mem object, verify that it is not null or UNBOUND, if it is, report error. Return skip value.
613bool VerifyBoundMemoryIsValid(const layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, const char *api_name,
614                              const char *type_name, UNIQUE_VALIDATION_ERROR_CODE error_code) {
615    bool result = false;
616    if (VK_NULL_HANDLE == mem) {
617        result = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, handle,
618                         __LINE__, error_code, "MEM", "%s: Vk%s object 0x%" PRIxLEAST64
619                                                      " used with no memory bound. Memory should be bound by calling "
620                                                      "vkBind%sMemory(). %s",
621                         api_name, type_name, handle, type_name, validation_error_map[error_code]);
622    } else if (MEMORY_UNBOUND == mem) {
623        result = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, handle,
624                         __LINE__, error_code, "MEM", "%s: Vk%s object 0x%" PRIxLEAST64
625                                                      " used with no memory bound and previously bound memory was freed. "
626                                                      "Memory must not be freed prior to this operation. %s",
627                         api_name, type_name, handle, validation_error_map[error_code]);
628    }
629    return result;
630}
631
632// Check to see if memory was ever bound to this image
633bool ValidateMemoryIsBoundToImage(const layer_data *dev_data, const IMAGE_STATE *image_state, const char *api_name,
634                                  UNIQUE_VALIDATION_ERROR_CODE error_code) {
635    bool result = false;
636    if (0 == (static_cast<uint32_t>(image_state->createInfo.flags) & VK_IMAGE_CREATE_SPARSE_BINDING_BIT)) {
637        result = VerifyBoundMemoryIsValid(dev_data, image_state->binding.mem,
638                                          reinterpret_cast<const uint64_t &>(image_state->image), api_name, "Image", error_code);
639    }
640    return result;
641}
642
643// Check to see if memory was bound to this buffer
644bool ValidateMemoryIsBoundToBuffer(const layer_data *dev_data, const BUFFER_STATE *buffer_state, const char *api_name,
645                                   UNIQUE_VALIDATION_ERROR_CODE error_code) {
646    bool result = false;
647    if (0 == (static_cast<uint32_t>(buffer_state->createInfo.flags) & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)) {
648        result = VerifyBoundMemoryIsValid(dev_data, buffer_state->binding.mem,
649                                          reinterpret_cast<const uint64_t &>(buffer_state->buffer), api_name, "Buffer", error_code);
650    }
651    return result;
652}
653
654// SetMemBinding is used to establish immutable, non-sparse binding between a single image/buffer object and memory object.
655// Corresponding valid usage checks are in ValidateSetMemBinding().
656static void SetMemBinding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, VulkanObjectType type, const char *apiName) {
657    if (mem != VK_NULL_HANDLE) {
658        BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
659        assert(mem_binding);
660        DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
661        if (mem_info) {
662            mem_info->obj_bindings.insert({handle, type});
663            // For image objects, make sure default memory state is correctly set
664            // TODO : What's the best/correct way to handle this?
665            if (kVulkanObjectTypeImage == type) {
666                auto const image_state = GetImageState(dev_data, VkImage(handle));
667                if (image_state) {
668                    VkImageCreateInfo ici = image_state->createInfo;
669                    if (ici.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
670                        // TODO::  More memory state transition stuff.
671                    }
672                }
673            }
674            mem_binding->binding.mem = mem;
675        }
676    }
677}
678
679// Valid usage checks for a call to SetMemBinding().
680// For NULL mem case, output warning
681// Make sure given object is in global object map
682//  IF a previous binding existed, output validation error
683//  Otherwise, add reference from objectInfo to memoryInfo
684//  Add reference off of objInfo
685// TODO: We may need to refactor or pass in multiple valid usage statements to handle multiple valid usage conditions.
686static bool ValidateSetMemBinding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, VulkanObjectType type,
687                                  const char *apiName) {
688    bool skip = false;
689    // It's an error to bind an object to NULL memory
690    if (mem != VK_NULL_HANDLE) {
691        BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
692        assert(mem_binding);
693        if (mem_binding->sparse) {
694            UNIQUE_VALIDATION_ERROR_CODE error_code = VALIDATION_ERROR_00804;
695            const char *handle_type = "IMAGE";
696            if (strcmp(apiName, "vkBindBufferMemory()") == 0) {
697                error_code = VALIDATION_ERROR_00792;
698                handle_type = "BUFFER";
699            } else {
700                assert(strcmp(apiName, "vkBindImageMemory()") == 0);
701            }
702            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
703                            reinterpret_cast<uint64_t &>(mem), __LINE__, error_code, "MEM",
704                            "In %s, attempting to bind memory (0x%" PRIxLEAST64 ") to object (0x%" PRIxLEAST64
705                            ") which was created with sparse memory flags (VK_%s_CREATE_SPARSE_*_BIT). %s",
706                            apiName, reinterpret_cast<uint64_t &>(mem), handle, handle_type, validation_error_map[error_code]);
707        }
708        DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
709        if (mem_info) {
710            DEVICE_MEM_INFO *prev_binding = GetMemObjInfo(dev_data, mem_binding->binding.mem);
711            if (prev_binding) {
712                UNIQUE_VALIDATION_ERROR_CODE error_code = VALIDATION_ERROR_00803;
713                if (strcmp(apiName, "vkBindBufferMemory()") == 0) {
714                    error_code = VALIDATION_ERROR_00791;
715                } else {
716                    assert(strcmp(apiName, "vkBindImageMemory()") == 0);
717                }
718                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
719                                reinterpret_cast<uint64_t &>(mem), __LINE__, error_code, "MEM",
720                                "In %s, attempting to bind memory (0x%" PRIxLEAST64 ") to object (0x%" PRIxLEAST64
721                                ") which has already been bound to mem object 0x%" PRIxLEAST64 ". %s",
722                                apiName, reinterpret_cast<uint64_t &>(mem), handle, reinterpret_cast<uint64_t &>(prev_binding->mem),
723                                validation_error_map[error_code]);
724            } else if (mem_binding->binding.mem == MEMORY_UNBOUND) {
725                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
726                                reinterpret_cast<uint64_t &>(mem), __LINE__, MEMTRACK_REBIND_OBJECT, "MEM",
727                                "In %s, attempting to bind memory (0x%" PRIxLEAST64 ") to object (0x%" PRIxLEAST64
728                                ") which was previous bound to memory that has since been freed. Memory bindings are immutable in "
729                                "Vulkan so this attempt to bind to new memory is not allowed.",
730                                apiName, reinterpret_cast<uint64_t &>(mem), handle);
731            }
732        }
733    }
734    return skip;
735}
736
737// For NULL mem case, clear any previous binding Else...
738// Make sure given object is in its object map
739//  IF a previous binding existed, update binding
740//  Add reference from objectInfo to memoryInfo
741//  Add reference off of object's binding info
742// Return VK_TRUE if addition is successful, VK_FALSE otherwise
743static bool SetSparseMemBinding(layer_data *dev_data, MEM_BINDING binding, uint64_t handle, VulkanObjectType type) {
744    bool skip = VK_FALSE;
745    // Handle NULL case separately, just clear previous binding & decrement reference
746    if (binding.mem == VK_NULL_HANDLE) {
747        // TODO : This should cause the range of the resource to be unbound according to spec
748    } else {
749        BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
750        assert(mem_binding);
751        assert(mem_binding->sparse);
752        DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, binding.mem);
753        if (mem_info) {
754            mem_info->obj_bindings.insert({handle, type});
755            // Need to set mem binding for this object
756            mem_binding->sparse_bindings.insert(binding);
757        }
758    }
759    return skip;
760}
761
762// SPIRV utility functions
763static void build_def_index(shader_module *module) {
764    for (auto insn : *module) {
765        switch (insn.opcode()) {
766            // Types
767            case spv::OpTypeVoid:
768            case spv::OpTypeBool:
769            case spv::OpTypeInt:
770            case spv::OpTypeFloat:
771            case spv::OpTypeVector:
772            case spv::OpTypeMatrix:
773            case spv::OpTypeImage:
774            case spv::OpTypeSampler:
775            case spv::OpTypeSampledImage:
776            case spv::OpTypeArray:
777            case spv::OpTypeRuntimeArray:
778            case spv::OpTypeStruct:
779            case spv::OpTypeOpaque:
780            case spv::OpTypePointer:
781            case spv::OpTypeFunction:
782            case spv::OpTypeEvent:
783            case spv::OpTypeDeviceEvent:
784            case spv::OpTypeReserveId:
785            case spv::OpTypeQueue:
786            case spv::OpTypePipe:
787                module->def_index[insn.word(1)] = insn.offset();
788                break;
789
790            // Fixed constants
791            case spv::OpConstantTrue:
792            case spv::OpConstantFalse:
793            case spv::OpConstant:
794            case spv::OpConstantComposite:
795            case spv::OpConstantSampler:
796            case spv::OpConstantNull:
797                module->def_index[insn.word(2)] = insn.offset();
798                break;
799
800            // Specialization constants
801            case spv::OpSpecConstantTrue:
802            case spv::OpSpecConstantFalse:
803            case spv::OpSpecConstant:
804            case spv::OpSpecConstantComposite:
805            case spv::OpSpecConstantOp:
806                module->def_index[insn.word(2)] = insn.offset();
807                break;
808
809            // Variables
810            case spv::OpVariable:
811                module->def_index[insn.word(2)] = insn.offset();
812                break;
813
814            // Functions
815            case spv::OpFunction:
816                module->def_index[insn.word(2)] = insn.offset();
817                break;
818
819            default:
820                // We don't care about any other defs for now.
821                break;
822        }
823    }
824}
825
826static spirv_inst_iter find_entrypoint(shader_module *src, char const *name, VkShaderStageFlagBits stageBits) {
827    for (auto insn : *src) {
828        if (insn.opcode() == spv::OpEntryPoint) {
829            auto entrypointName = (char const *)&insn.word(3);
830            auto entrypointStageBits = 1u << insn.word(1);
831
832            if (!strcmp(entrypointName, name) && (entrypointStageBits & stageBits)) {
833                return insn;
834            }
835        }
836    }
837
838    return src->end();
839}
840
841static char const *storage_class_name(unsigned sc) {
842    switch (sc) {
843        case spv::StorageClassInput:
844            return "input";
845        case spv::StorageClassOutput:
846            return "output";
847        case spv::StorageClassUniformConstant:
848            return "const uniform";
849        case spv::StorageClassUniform:
850            return "uniform";
851        case spv::StorageClassWorkgroup:
852            return "workgroup local";
853        case spv::StorageClassCrossWorkgroup:
854            return "workgroup global";
855        case spv::StorageClassPrivate:
856            return "private global";
857        case spv::StorageClassFunction:
858            return "function";
859        case spv::StorageClassGeneric:
860            return "generic";
861        case spv::StorageClassAtomicCounter:
862            return "atomic counter";
863        case spv::StorageClassImage:
864            return "image";
865        case spv::StorageClassPushConstant:
866            return "push constant";
867        default:
868            return "unknown";
869    }
870}
871
872// Get the value of an integral constant
873unsigned get_constant_value(shader_module const *src, unsigned id) {
874    auto value = src->get_def(id);
875    assert(value != src->end());
876
877    if (value.opcode() != spv::OpConstant) {
878        // TODO: Either ensure that the specialization transform is already performed on a module we're
879        //       considering here, OR -- specialize on the fly now.
880        return 1;
881    }
882
883    return value.word(3);
884}
885
886static void describe_type_inner(std::ostringstream &ss, shader_module const *src, unsigned type) {
887    auto insn = src->get_def(type);
888    assert(insn != src->end());
889
890    switch (insn.opcode()) {
891        case spv::OpTypeBool:
892            ss << "bool";
893            break;
894        case spv::OpTypeInt:
895            ss << (insn.word(3) ? 's' : 'u') << "int" << insn.word(2);
896            break;
897        case spv::OpTypeFloat:
898            ss << "float" << insn.word(2);
899            break;
900        case spv::OpTypeVector:
901            ss << "vec" << insn.word(3) << " of ";
902            describe_type_inner(ss, src, insn.word(2));
903            break;
904        case spv::OpTypeMatrix:
905            ss << "mat" << insn.word(3) << " of ";
906            describe_type_inner(ss, src, insn.word(2));
907            break;
908        case spv::OpTypeArray:
909            ss << "arr[" << get_constant_value(src, insn.word(3)) << "] of ";
910            describe_type_inner(ss, src, insn.word(2));
911            break;
912        case spv::OpTypePointer:
913            ss << "ptr to " << storage_class_name(insn.word(2)) << " ";
914            describe_type_inner(ss, src, insn.word(3));
915            break;
916        case spv::OpTypeStruct: {
917            ss << "struct of (";
918            for (unsigned i = 2; i < insn.len(); i++) {
919                describe_type_inner(ss, src, insn.word(i));
920                if (i == insn.len() - 1) {
921                    ss << ")";
922                } else {
923                    ss << ", ";
924                }
925            }
926            break;
927        }
928        case spv::OpTypeSampler:
929            ss << "sampler";
930            break;
931        case spv::OpTypeSampledImage:
932            ss << "sampler+";
933            describe_type_inner(ss, src, insn.word(2));
934            break;
935        case spv::OpTypeImage:
936            ss << "image(dim=" << insn.word(3) << ", sampled=" << insn.word(7) << ")";
937            break;
938        default:
939            ss << "oddtype";
940            break;
941    }
942}
943
944static std::string describe_type(shader_module const *src, unsigned type) {
945    std::ostringstream ss;
946    describe_type_inner(ss, src, type);
947    return ss.str();
948}
949
950static bool is_narrow_numeric_type(spirv_inst_iter type) {
951    if (type.opcode() != spv::OpTypeInt && type.opcode() != spv::OpTypeFloat) return false;
952    return type.word(2) < 64;
953}
954
955static bool types_match(shader_module const *a, shader_module const *b, unsigned a_type, unsigned b_type, bool a_arrayed,
956                        bool b_arrayed, bool relaxed) {
957    // Walk two type trees together, and complain about differences
958    auto a_insn = a->get_def(a_type);
959    auto b_insn = b->get_def(b_type);
960    assert(a_insn != a->end());
961    assert(b_insn != b->end());
962
963    if (a_arrayed && a_insn.opcode() == spv::OpTypeArray) {
964        return types_match(a, b, a_insn.word(2), b_type, false, b_arrayed, relaxed);
965    }
966
967    if (b_arrayed && b_insn.opcode() == spv::OpTypeArray) {
968        // We probably just found the extra level of arrayness in b_type: compare the type inside it to a_type
969        return types_match(a, b, a_type, b_insn.word(2), a_arrayed, false, relaxed);
970    }
971
972    if (a_insn.opcode() == spv::OpTypeVector && relaxed && is_narrow_numeric_type(b_insn)) {
973        return types_match(a, b, a_insn.word(2), b_type, a_arrayed, b_arrayed, false);
974    }
975
976    if (a_insn.opcode() != b_insn.opcode()) {
977        return false;
978    }
979
980    if (a_insn.opcode() == spv::OpTypePointer) {
981        // Match on pointee type. storage class is expected to differ
982        return types_match(a, b, a_insn.word(3), b_insn.word(3), a_arrayed, b_arrayed, relaxed);
983    }
984
985    if (a_arrayed || b_arrayed) {
986        // If we havent resolved array-of-verts by here, we're not going to.
987        return false;
988    }
989
990    switch (a_insn.opcode()) {
991        case spv::OpTypeBool:
992            return true;
993        case spv::OpTypeInt:
994            // Match on width, signedness
995            return a_insn.word(2) == b_insn.word(2) && a_insn.word(3) == b_insn.word(3);
996        case spv::OpTypeFloat:
997            // Match on width
998            return a_insn.word(2) == b_insn.word(2);
999        case spv::OpTypeVector:
1000            // Match on element type, count.
1001            if (!types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false)) return false;
1002            if (relaxed && is_narrow_numeric_type(a->get_def(a_insn.word(2)))) {
1003                return a_insn.word(3) >= b_insn.word(3);
1004            } else {
1005                return a_insn.word(3) == b_insn.word(3);
1006            }
1007        case spv::OpTypeMatrix:
1008            // Match on element type, count.
1009            return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false) &&
1010                   a_insn.word(3) == b_insn.word(3);
1011        case spv::OpTypeArray:
1012            // Match on element type, count. these all have the same layout. we don't get here if b_arrayed. This differs from
1013            // vector & matrix types in that the array size is the id of a constant instruction, * not a literal within OpTypeArray
1014            return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false) &&
1015                   get_constant_value(a, a_insn.word(3)) == get_constant_value(b, b_insn.word(3));
1016        case spv::OpTypeStruct:
1017            // Match on all element types
1018            {
1019                if (a_insn.len() != b_insn.len()) {
1020                    return false;  // Structs cannot match if member counts differ
1021                }
1022
1023                for (unsigned i = 2; i < a_insn.len(); i++) {
1024                    if (!types_match(a, b, a_insn.word(i), b_insn.word(i), a_arrayed, b_arrayed, false)) {
1025                        return false;
1026                    }
1027                }
1028
1029                return true;
1030            }
1031        default:
1032            // Remaining types are CLisms, or may not appear in the interfaces we are interested in. Just claim no match.
1033            return false;
1034    }
1035}
1036
1037static unsigned value_or_default(std::unordered_map<unsigned, unsigned> const &map, unsigned id, unsigned def) {
1038    auto it = map.find(id);
1039    if (it == map.end())
1040        return def;
1041    else
1042        return it->second;
1043}
1044
1045static unsigned get_locations_consumed_by_type(shader_module const *src, unsigned type, bool strip_array_level) {
1046    auto insn = src->get_def(type);
1047    assert(insn != src->end());
1048
1049    switch (insn.opcode()) {
1050        case spv::OpTypePointer:
1051            // See through the ptr -- this is only ever at the toplevel for graphics shaders we're never actually passing
1052            // pointers around.
1053            return get_locations_consumed_by_type(src, insn.word(3), strip_array_level);
1054        case spv::OpTypeArray:
1055            if (strip_array_level) {
1056                return get_locations_consumed_by_type(src, insn.word(2), false);
1057            } else {
1058                return get_constant_value(src, insn.word(3)) * get_locations_consumed_by_type(src, insn.word(2), false);
1059            }
1060        case spv::OpTypeMatrix:
1061            // Num locations is the dimension * element size
1062            return insn.word(3) * get_locations_consumed_by_type(src, insn.word(2), false);
1063        case spv::OpTypeVector: {
1064            auto scalar_type = src->get_def(insn.word(2));
1065            auto bit_width =
1066                (scalar_type.opcode() == spv::OpTypeInt || scalar_type.opcode() == spv::OpTypeFloat) ? scalar_type.word(2) : 32;
1067
1068            // Locations are 128-bit wide; 3- and 4-component vectors of 64 bit types require two.
1069            return (bit_width * insn.word(3) + 127) / 128;
1070        }
1071        default:
1072            // Everything else is just 1.
1073            return 1;
1074
1075            // TODO: extend to handle 64bit scalar types, whose vectors may need multiple locations.
1076    }
1077}
1078
1079static unsigned get_locations_consumed_by_format(VkFormat format) {
1080    switch (format) {
1081        case VK_FORMAT_R64G64B64A64_SFLOAT:
1082        case VK_FORMAT_R64G64B64A64_SINT:
1083        case VK_FORMAT_R64G64B64A64_UINT:
1084        case VK_FORMAT_R64G64B64_SFLOAT:
1085        case VK_FORMAT_R64G64B64_SINT:
1086        case VK_FORMAT_R64G64B64_UINT:
1087            return 2;
1088        default:
1089            return 1;
1090    }
1091}
1092
1093typedef std::pair<unsigned, unsigned> location_t;
1094typedef std::pair<unsigned, unsigned> descriptor_slot_t;
1095
1096struct interface_var {
1097    uint32_t id;
1098    uint32_t type_id;
1099    uint32_t offset;
1100    bool is_patch;
1101    bool is_block_member;
1102    bool is_relaxed_precision;
1103    // TODO: collect the name, too? Isn't required to be present.
1104};
1105
1106struct shader_stage_attributes {
1107    char const *const name;
1108    bool arrayed_input;
1109    bool arrayed_output;
1110};
1111
1112static shader_stage_attributes shader_stage_attribs[] = {
1113    {"vertex shader", false, false},  {"tessellation control shader", true, true}, {"tessellation evaluation shader", true, false},
1114    {"geometry shader", true, false}, {"fragment shader", false, false},
1115};
1116
1117static spirv_inst_iter get_struct_type(shader_module const *src, spirv_inst_iter def, bool is_array_of_verts) {
1118    while (true) {
1119        if (def.opcode() == spv::OpTypePointer) {
1120            def = src->get_def(def.word(3));
1121        } else if (def.opcode() == spv::OpTypeArray && is_array_of_verts) {
1122            def = src->get_def(def.word(2));
1123            is_array_of_verts = false;
1124        } else if (def.opcode() == spv::OpTypeStruct) {
1125            return def;
1126        } else {
1127            return src->end();
1128        }
1129    }
1130}
1131
1132static void collect_interface_block_members(shader_module const *src, std::map<location_t, interface_var> *out,
1133                                            std::unordered_map<unsigned, unsigned> const &blocks, bool is_array_of_verts,
1134                                            uint32_t id, uint32_t type_id, bool is_patch) {
1135    // Walk down the type_id presented, trying to determine whether it's actually an interface block.
1136    auto type = get_struct_type(src, src->get_def(type_id), is_array_of_verts && !is_patch);
1137    if (type == src->end() || blocks.find(type.word(1)) == blocks.end()) {
1138        // This isn't an interface block.
1139        return;
1140    }
1141
1142    std::unordered_map<unsigned, unsigned> member_components;
1143    std::unordered_map<unsigned, unsigned> member_relaxed_precision;
1144
1145    // Walk all the OpMemberDecorate for type's result id -- first pass, collect components.
1146    for (auto insn : *src) {
1147        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1148            unsigned member_index = insn.word(2);
1149
1150            if (insn.word(3) == spv::DecorationComponent) {
1151                unsigned component = insn.word(4);
1152                member_components[member_index] = component;
1153            }
1154
1155            if (insn.word(3) == spv::DecorationRelaxedPrecision) {
1156                member_relaxed_precision[member_index] = 1;
1157            }
1158        }
1159    }
1160
1161    // Second pass -- produce the output, from Location decorations
1162    for (auto insn : *src) {
1163        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1164            unsigned member_index = insn.word(2);
1165            unsigned member_type_id = type.word(2 + member_index);
1166
1167            if (insn.word(3) == spv::DecorationLocation) {
1168                unsigned location = insn.word(4);
1169                unsigned num_locations = get_locations_consumed_by_type(src, member_type_id, false);
1170                auto component_it = member_components.find(member_index);
1171                unsigned component = component_it == member_components.end() ? 0 : component_it->second;
1172                bool is_relaxed_precision = member_relaxed_precision.find(member_index) != member_relaxed_precision.end();
1173
1174                for (unsigned int offset = 0; offset < num_locations; offset++) {
1175                    interface_var v = {};
1176                    v.id = id;
1177                    // TODO: member index in interface_var too?
1178                    v.type_id = member_type_id;
1179                    v.offset = offset;
1180                    v.is_patch = is_patch;
1181                    v.is_block_member = true;
1182                    v.is_relaxed_precision = is_relaxed_precision;
1183                    (*out)[std::make_pair(location + offset, component)] = v;
1184                }
1185            }
1186        }
1187    }
1188}
1189
1190static std::map<location_t, interface_var> collect_interface_by_location(shader_module const *src, spirv_inst_iter entrypoint,
1191                                                                         spv::StorageClass sinterface, bool is_array_of_verts) {
1192    std::unordered_map<unsigned, unsigned> var_locations;
1193    std::unordered_map<unsigned, unsigned> var_builtins;
1194    std::unordered_map<unsigned, unsigned> var_components;
1195    std::unordered_map<unsigned, unsigned> blocks;
1196    std::unordered_map<unsigned, unsigned> var_patch;
1197    std::unordered_map<unsigned, unsigned> var_relaxed_precision;
1198
1199    for (auto insn : *src) {
1200        // We consider two interface models: SSO rendezvous-by-location, and builtins. Complain about anything that
1201        // fits neither model.
1202        if (insn.opcode() == spv::OpDecorate) {
1203            if (insn.word(2) == spv::DecorationLocation) {
1204                var_locations[insn.word(1)] = insn.word(3);
1205            }
1206
1207            if (insn.word(2) == spv::DecorationBuiltIn) {
1208                var_builtins[insn.word(1)] = insn.word(3);
1209            }
1210
1211            if (insn.word(2) == spv::DecorationComponent) {
1212                var_components[insn.word(1)] = insn.word(3);
1213            }
1214
1215            if (insn.word(2) == spv::DecorationBlock) {
1216                blocks[insn.word(1)] = 1;
1217            }
1218
1219            if (insn.word(2) == spv::DecorationPatch) {
1220                var_patch[insn.word(1)] = 1;
1221            }
1222
1223            if (insn.word(2) == spv::DecorationRelaxedPrecision) {
1224                var_relaxed_precision[insn.word(1)] = 1;
1225            }
1226        }
1227    }
1228
1229    // TODO: handle grouped decorations
1230    // TODO: handle index=1 dual source outputs from FS -- two vars will have the same location, and we DON'T want to clobber.
1231
1232    // Find the end of the entrypoint's name string. additional zero bytes follow the actual null terminator, to fill out the
1233    // rest of the word - so we only need to look at the last byte in the word to determine which word contains the terminator.
1234    uint32_t word = 3;
1235    while (entrypoint.word(word) & 0xff000000u) {
1236        ++word;
1237    }
1238    ++word;
1239
1240    std::map<location_t, interface_var> out;
1241
1242    for (; word < entrypoint.len(); word++) {
1243        auto insn = src->get_def(entrypoint.word(word));
1244        assert(insn != src->end());
1245        assert(insn.opcode() == spv::OpVariable);
1246
1247        if (insn.word(3) == static_cast<uint32_t>(sinterface)) {
1248            unsigned id = insn.word(2);
1249            unsigned type = insn.word(1);
1250
1251            int location = value_or_default(var_locations, id, -1);
1252            int builtin = value_or_default(var_builtins, id, -1);
1253            unsigned component = value_or_default(var_components, id, 0);  // Unspecified is OK, is 0
1254            bool is_patch = var_patch.find(id) != var_patch.end();
1255            bool is_relaxed_precision = var_relaxed_precision.find(id) != var_relaxed_precision.end();
1256
1257            // All variables and interface block members in the Input or Output storage classes must be decorated with either
1258            // a builtin or an explicit location.
1259            //
1260            // TODO: integrate the interface block support here. For now, don't complain -- a valid SPIRV module will only hit
1261            // this path for the interface block case, as the individual members of the type are decorated, rather than
1262            // variable declarations.
1263
1264            if (location != -1) {
1265                // A user-defined interface variable, with a location. Where a variable occupied multiple locations, emit
1266                // one result for each.
1267                unsigned num_locations = get_locations_consumed_by_type(src, type, is_array_of_verts && !is_patch);
1268                for (unsigned int offset = 0; offset < num_locations; offset++) {
1269                    interface_var v = {};
1270                    v.id = id;
1271                    v.type_id = type;
1272                    v.offset = offset;
1273                    v.is_patch = is_patch;
1274                    v.is_relaxed_precision = is_relaxed_precision;
1275                    out[std::make_pair(location + offset, component)] = v;
1276                }
1277            } else if (builtin == -1) {
1278                // An interface block instance
1279                collect_interface_block_members(src, &out, blocks, is_array_of_verts, id, type, is_patch);
1280            }
1281        }
1282    }
1283
1284    return out;
1285}
1286
1287static vector<std::pair<uint32_t, interface_var>> collect_interface_by_input_attachment_index(
1288    shader_module const *src, std::unordered_set<uint32_t> const &accessible_ids) {
1289    std::vector<std::pair<uint32_t, interface_var>> out;
1290
1291    for (auto insn : *src) {
1292        if (insn.opcode() == spv::OpDecorate) {
1293            if (insn.word(2) == spv::DecorationInputAttachmentIndex) {
1294                auto attachment_index = insn.word(3);
1295                auto id = insn.word(1);
1296
1297                if (accessible_ids.count(id)) {
1298                    auto def = src->get_def(id);
1299                    assert(def != src->end());
1300
1301                    if (def.opcode() == spv::OpVariable && insn.word(3) == spv::StorageClassUniformConstant) {
1302                        auto num_locations = get_locations_consumed_by_type(src, def.word(1), false);
1303                        for (unsigned int offset = 0; offset < num_locations; offset++) {
1304                            interface_var v = {};
1305                            v.id = id;
1306                            v.type_id = def.word(1);
1307                            v.offset = offset;
1308                            out.emplace_back(attachment_index + offset, v);
1309                        }
1310                    }
1311                }
1312            }
1313        }
1314    }
1315
1316    return out;
1317}
1318
1319static std::vector<std::pair<descriptor_slot_t, interface_var>> collect_interface_by_descriptor_slot(
1320    debug_report_data *report_data, shader_module const *src, std::unordered_set<uint32_t> const &accessible_ids) {
1321    std::unordered_map<unsigned, unsigned> var_sets;
1322    std::unordered_map<unsigned, unsigned> var_bindings;
1323
1324    for (auto insn : *src) {
1325        // All variables in the Uniform or UniformConstant storage classes are required to be decorated with both
1326        // DecorationDescriptorSet and DecorationBinding.
1327        if (insn.opcode() == spv::OpDecorate) {
1328            if (insn.word(2) == spv::DecorationDescriptorSet) {
1329                var_sets[insn.word(1)] = insn.word(3);
1330            }
1331
1332            if (insn.word(2) == spv::DecorationBinding) {
1333                var_bindings[insn.word(1)] = insn.word(3);
1334            }
1335        }
1336    }
1337
1338    std::vector<std::pair<descriptor_slot_t, interface_var>> out;
1339
1340    for (auto id : accessible_ids) {
1341        auto insn = src->get_def(id);
1342        assert(insn != src->end());
1343
1344        if (insn.opcode() == spv::OpVariable &&
1345            (insn.word(3) == spv::StorageClassUniform || insn.word(3) == spv::StorageClassUniformConstant)) {
1346            unsigned set = value_or_default(var_sets, insn.word(2), 0);
1347            unsigned binding = value_or_default(var_bindings, insn.word(2), 0);
1348
1349            interface_var v = {};
1350            v.id = insn.word(2);
1351            v.type_id = insn.word(1);
1352            out.emplace_back(std::make_pair(set, binding), v);
1353        }
1354    }
1355
1356    return out;
1357}
1358
1359static bool validate_interface_between_stages(debug_report_data *report_data, shader_module const *producer,
1360                                              spirv_inst_iter producer_entrypoint, shader_stage_attributes const *producer_stage,
1361                                              shader_module const *consumer, spirv_inst_iter consumer_entrypoint,
1362                                              shader_stage_attributes const *consumer_stage) {
1363    bool pass = true;
1364
1365    auto outputs =
1366        collect_interface_by_location(producer, producer_entrypoint, spv::StorageClassOutput, producer_stage->arrayed_output);
1367    auto inputs =
1368        collect_interface_by_location(consumer, consumer_entrypoint, spv::StorageClassInput, consumer_stage->arrayed_input);
1369
1370    auto a_it = outputs.begin();
1371    auto b_it = inputs.begin();
1372
1373    // Maps sorted by key (location); walk them together to find mismatches
1374    while ((outputs.size() > 0 && a_it != outputs.end()) || (inputs.size() && b_it != inputs.end())) {
1375        bool a_at_end = outputs.size() == 0 || a_it == outputs.end();
1376        bool b_at_end = inputs.size() == 0 || b_it == inputs.end();
1377        auto a_first = a_at_end ? std::make_pair(0u, 0u) : a_it->first;
1378        auto b_first = b_at_end ? std::make_pair(0u, 0u) : b_it->first;
1379
1380        if (b_at_end || ((!a_at_end) && (a_first < b_first))) {
1381            if (log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
1382                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1383                        "%s writes to output location %u.%u which is not consumed by %s", producer_stage->name, a_first.first,
1384                        a_first.second, consumer_stage->name)) {
1385                pass = false;
1386            }
1387            a_it++;
1388        } else if (a_at_end || a_first > b_first) {
1389            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
1390                        SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "%s consumes input location %u.%u which is not written by %s",
1391                        consumer_stage->name, b_first.first, b_first.second, producer_stage->name)) {
1392                pass = false;
1393            }
1394            b_it++;
1395        } else {
1396            // subtleties of arrayed interfaces:
1397            // - if is_patch, then the member is not arrayed, even though the interface may be.
1398            // - if is_block_member, then the extra array level of an arrayed interface is not
1399            //   expressed in the member type -- it's expressed in the block type.
1400            if (!types_match(producer, consumer, a_it->second.type_id, b_it->second.type_id,
1401                             producer_stage->arrayed_output && !a_it->second.is_patch && !a_it->second.is_block_member,
1402                             consumer_stage->arrayed_input && !b_it->second.is_patch && !b_it->second.is_block_member, true)) {
1403                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
1404                            SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC", "Type mismatch on location %u.%u: '%s' vs '%s'",
1405                            a_first.first, a_first.second, describe_type(producer, a_it->second.type_id).c_str(),
1406                            describe_type(consumer, b_it->second.type_id).c_str())) {
1407                    pass = false;
1408                }
1409            }
1410            if (a_it->second.is_patch != b_it->second.is_patch) {
1411                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 0, __LINE__,
1412                            SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1413                            "Decoration mismatch on location %u.%u: is per-%s in %s stage but "
1414                            "per-%s in %s stage",
1415                            a_first.first, a_first.second, a_it->second.is_patch ? "patch" : "vertex", producer_stage->name,
1416                            b_it->second.is_patch ? "patch" : "vertex", consumer_stage->name)) {
1417                    pass = false;
1418                }
1419            }
1420            if (a_it->second.is_relaxed_precision != b_it->second.is_relaxed_precision) {
1421                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 0, __LINE__,
1422                            SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1423                            "Decoration mismatch on location %u.%u: %s and %s stages differ in precision", a_first.first,
1424                            a_first.second, producer_stage->name, consumer_stage->name)) {
1425                    pass = false;
1426                }
1427            }
1428            a_it++;
1429            b_it++;
1430        }
1431    }
1432
1433    return pass;
1434}
1435
1436enum FORMAT_TYPE {
1437    FORMAT_TYPE_FLOAT = 1,  // UNORM, SNORM, FLOAT, USCALED, SSCALED, SRGB -- anything we consider float in the shader
1438    FORMAT_TYPE_SINT = 2,
1439    FORMAT_TYPE_UINT = 4,
1440};
1441
1442static unsigned get_format_type(VkFormat fmt) {
1443    if (FormatIsSInt(fmt))
1444        return FORMAT_TYPE_SINT;
1445    if (FormatIsUInt(fmt))
1446        return FORMAT_TYPE_UINT;
1447    if (FormatIsDepthAndStencil(fmt))
1448        return FORMAT_TYPE_FLOAT | FORMAT_TYPE_UINT;
1449    if (fmt == VK_FORMAT_UNDEFINED)
1450        return 0;
1451    // everything else -- UNORM/SNORM/FLOAT/USCALED/SSCALED is all float in the shader.
1452    return FORMAT_TYPE_FLOAT;
1453}
1454
1455// characterizes a SPIR-V type appearing in an interface to a FF stage, for comparison to a VkFormat's characterization above.
1456static unsigned get_fundamental_type(shader_module const *src, unsigned type) {
1457    auto insn = src->get_def(type);
1458    assert(insn != src->end());
1459
1460    switch (insn.opcode()) {
1461        case spv::OpTypeInt:
1462            return insn.word(3) ? FORMAT_TYPE_SINT : FORMAT_TYPE_UINT;
1463        case spv::OpTypeFloat:
1464            return FORMAT_TYPE_FLOAT;
1465        case spv::OpTypeVector:
1466            return get_fundamental_type(src, insn.word(2));
1467        case spv::OpTypeMatrix:
1468            return get_fundamental_type(src, insn.word(2));
1469        case spv::OpTypeArray:
1470            return get_fundamental_type(src, insn.word(2));
1471        case spv::OpTypePointer:
1472            return get_fundamental_type(src, insn.word(3));
1473        case spv::OpTypeImage:
1474            return get_fundamental_type(src, insn.word(2));
1475
1476        default:
1477            return 0;
1478    }
1479}
1480
1481static uint32_t get_shader_stage_id(VkShaderStageFlagBits stage) {
1482    uint32_t bit_pos = uint32_t(u_ffs(stage));
1483    return bit_pos - 1;
1484}
1485
1486static bool validate_vi_consistency(debug_report_data *report_data, VkPipelineVertexInputStateCreateInfo const *vi) {
1487    // Walk the binding descriptions, which describe the step rate and stride of each vertex buffer.  Each binding should
1488    // be specified only once.
1489    std::unordered_map<uint32_t, VkVertexInputBindingDescription const *> bindings;
1490    bool pass = true;
1491
1492    for (unsigned i = 0; i < vi->vertexBindingDescriptionCount; i++) {
1493        auto desc = &vi->pVertexBindingDescriptions[i];
1494        auto &binding = bindings[desc->binding];
1495        if (binding) {
1496            // TODO: VALIDATION_ERROR_02105 perhaps?
1497            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
1498                        SHADER_CHECKER_INCONSISTENT_VI, "SC", "Duplicate vertex input binding descriptions for binding %d",
1499                        desc->binding)) {
1500                pass = false;
1501            }
1502        } else {
1503            binding = desc;
1504        }
1505    }
1506
1507    return pass;
1508}
1509
1510static bool validate_vi_against_vs_inputs(debug_report_data *report_data, VkPipelineVertexInputStateCreateInfo const *vi,
1511                                          shader_module const *vs, spirv_inst_iter entrypoint) {
1512    bool pass = true;
1513
1514    auto inputs = collect_interface_by_location(vs, entrypoint, spv::StorageClassInput, false);
1515
1516    // Build index by location
1517    std::map<uint32_t, VkVertexInputAttributeDescription const *> attribs;
1518    if (vi) {
1519        for (unsigned i = 0; i < vi->vertexAttributeDescriptionCount; i++) {
1520            auto num_locations = get_locations_consumed_by_format(vi->pVertexAttributeDescriptions[i].format);
1521            for (auto j = 0u; j < num_locations; j++) {
1522                attribs[vi->pVertexAttributeDescriptions[i].location + j] = &vi->pVertexAttributeDescriptions[i];
1523            }
1524        }
1525    }
1526
1527    auto it_a = attribs.begin();
1528    auto it_b = inputs.begin();
1529    bool used = false;
1530
1531    while ((attribs.size() > 0 && it_a != attribs.end()) || (inputs.size() > 0 && it_b != inputs.end())) {
1532        bool a_at_end = attribs.size() == 0 || it_a == attribs.end();
1533        bool b_at_end = inputs.size() == 0 || it_b == inputs.end();
1534        auto a_first = a_at_end ? 0 : it_a->first;
1535        auto b_first = b_at_end ? 0 : it_b->first.first;
1536        if (!a_at_end && (b_at_end || a_first < b_first)) {
1537            if (!used && log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
1538                                 0, __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1539                                 "Vertex attribute at location %d not consumed by vertex shader", a_first)) {
1540                pass = false;
1541            }
1542            used = false;
1543            it_a++;
1544        } else if (!b_at_end && (a_at_end || b_first < a_first)) {
1545            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 0, __LINE__,
1546                        SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "Vertex shader consumes input at location %d but not provided",
1547                        b_first)) {
1548                pass = false;
1549            }
1550            it_b++;
1551        } else {
1552            unsigned attrib_type = get_format_type(it_a->second->format);
1553            unsigned input_type = get_fundamental_type(vs, it_b->second.type_id);
1554
1555            // Type checking
1556            if (!(attrib_type & input_type)) {
1557                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
1558                            SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1559                            "Attribute type of `%s` at location %d does not match vertex shader input type of `%s`",
1560                            string_VkFormat(it_a->second->format), a_first, describe_type(vs, it_b->second.type_id).c_str())) {
1561                    pass = false;
1562                }
1563            }
1564
1565            // OK!
1566            used = true;
1567            it_b++;
1568        }
1569    }
1570
1571    return pass;
1572}
1573
1574static bool validate_fs_outputs_against_render_pass(debug_report_data *report_data, shader_module const *fs,
1575                                                    spirv_inst_iter entrypoint, VkRenderPassCreateInfo const *rpci,
1576                                                    uint32_t subpass_index) {
1577    std::map<uint32_t, VkFormat> color_attachments;
1578    auto subpass = rpci->pSubpasses[subpass_index];
1579    for (auto i = 0u; i < subpass.colorAttachmentCount; ++i) {
1580        uint32_t attachment = subpass.pColorAttachments[i].attachment;
1581        if (attachment == VK_ATTACHMENT_UNUSED) continue;
1582        if (rpci->pAttachments[attachment].format != VK_FORMAT_UNDEFINED) {
1583            color_attachments[i] = rpci->pAttachments[attachment].format;
1584        }
1585    }
1586
1587    bool pass = true;
1588
1589    // TODO: dual source blend index (spv::DecIndex, zero if not provided)
1590
1591    auto outputs = collect_interface_by_location(fs, entrypoint, spv::StorageClassOutput, false);
1592
1593    auto it_a = outputs.begin();
1594    auto it_b = color_attachments.begin();
1595
1596    // Walk attachment list and outputs together
1597
1598    while ((outputs.size() > 0 && it_a != outputs.end()) || (color_attachments.size() > 0 && it_b != color_attachments.end())) {
1599        bool a_at_end = outputs.size() == 0 || it_a == outputs.end();
1600        bool b_at_end = color_attachments.size() == 0 || it_b == color_attachments.end();
1601
1602        if (!a_at_end && (b_at_end || it_a->first.first < it_b->first)) {
1603            if (log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
1604                        SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1605                        "fragment shader writes to output location %d with no matching attachment", it_a->first.first)) {
1606                pass = false;
1607            }
1608            it_a++;
1609        } else if (!b_at_end && (a_at_end || it_a->first.first > it_b->first)) {
1610            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
1611                        SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "Attachment %d not written by fragment shader", it_b->first)) {
1612                pass = false;
1613            }
1614            it_b++;
1615        } else {
1616            unsigned output_type = get_fundamental_type(fs, it_a->second.type_id);
1617            unsigned att_type = get_format_type(it_b->second);
1618
1619            // Type checking
1620            if (!(output_type & att_type)) {
1621                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
1622                            SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1623                            "Attachment %d of type `%s` does not match fragment shader output type of `%s`", it_b->first,
1624                            string_VkFormat(it_b->second), describe_type(fs, it_a->second.type_id).c_str())) {
1625                    pass = false;
1626                }
1627            }
1628
1629            // OK!
1630            it_a++;
1631            it_b++;
1632        }
1633    }
1634
1635    return pass;
1636}
1637
1638// For some analyses, we need to know about all ids referenced by the static call tree of a particular entrypoint. This is
1639// important for identifying the set of shader resources actually used by an entrypoint, for example.
1640// Note: we only explore parts of the image which might actually contain ids we care about for the above analyses.
1641//  - NOT the shader input/output interfaces.
1642//
1643// TODO: The set of interesting opcodes here was determined by eyeballing the SPIRV spec. It might be worth
1644// converting parts of this to be generated from the machine-readable spec instead.
1645static std::unordered_set<uint32_t> mark_accessible_ids(shader_module const *src, spirv_inst_iter entrypoint) {
1646    std::unordered_set<uint32_t> ids;
1647    std::unordered_set<uint32_t> worklist;
1648    worklist.insert(entrypoint.word(2));
1649
1650    while (!worklist.empty()) {
1651        auto id_iter = worklist.begin();
1652        auto id = *id_iter;
1653        worklist.erase(id_iter);
1654
1655        auto insn = src->get_def(id);
1656        if (insn == src->end()) {
1657            // ID is something we didn't collect in build_def_index. that's OK -- we'll stumble across all kinds of things here
1658            // that we may not care about.
1659            continue;
1660        }
1661
1662        // Try to add to the output set
1663        if (!ids.insert(id).second) {
1664            continue;  // If we already saw this id, we don't want to walk it again.
1665        }
1666
1667        switch (insn.opcode()) {
1668            case spv::OpFunction:
1669                // Scan whole body of the function, enlisting anything interesting
1670                while (++insn, insn.opcode() != spv::OpFunctionEnd) {
1671                    switch (insn.opcode()) {
1672                        case spv::OpLoad:
1673                        case spv::OpAtomicLoad:
1674                        case spv::OpAtomicExchange:
1675                        case spv::OpAtomicCompareExchange:
1676                        case spv::OpAtomicCompareExchangeWeak:
1677                        case spv::OpAtomicIIncrement:
1678                        case spv::OpAtomicIDecrement:
1679                        case spv::OpAtomicIAdd:
1680                        case spv::OpAtomicISub:
1681                        case spv::OpAtomicSMin:
1682                        case spv::OpAtomicUMin:
1683                        case spv::OpAtomicSMax:
1684                        case spv::OpAtomicUMax:
1685                        case spv::OpAtomicAnd:
1686                        case spv::OpAtomicOr:
1687                        case spv::OpAtomicXor:
1688                            worklist.insert(insn.word(3));  // ptr
1689                            break;
1690                        case spv::OpStore:
1691                        case spv::OpAtomicStore:
1692                            worklist.insert(insn.word(1));  // ptr
1693                            break;
1694                        case spv::OpAccessChain:
1695                        case spv::OpInBoundsAccessChain:
1696                            worklist.insert(insn.word(3));  // base ptr
1697                            break;
1698                        case spv::OpSampledImage:
1699                        case spv::OpImageSampleImplicitLod:
1700                        case spv::OpImageSampleExplicitLod:
1701                        case spv::OpImageSampleDrefImplicitLod:
1702                        case spv::OpImageSampleDrefExplicitLod:
1703                        case spv::OpImageSampleProjImplicitLod:
1704                        case spv::OpImageSampleProjExplicitLod:
1705                        case spv::OpImageSampleProjDrefImplicitLod:
1706                        case spv::OpImageSampleProjDrefExplicitLod:
1707                        case spv::OpImageFetch:
1708                        case spv::OpImageGather:
1709                        case spv::OpImageDrefGather:
1710                        case spv::OpImageRead:
1711                        case spv::OpImage:
1712                        case spv::OpImageQueryFormat:
1713                        case spv::OpImageQueryOrder:
1714                        case spv::OpImageQuerySizeLod:
1715                        case spv::OpImageQuerySize:
1716                        case spv::OpImageQueryLod:
1717                        case spv::OpImageQueryLevels:
1718                        case spv::OpImageQuerySamples:
1719                        case spv::OpImageSparseSampleImplicitLod:
1720                        case spv::OpImageSparseSampleExplicitLod:
1721                        case spv::OpImageSparseSampleDrefImplicitLod:
1722                        case spv::OpImageSparseSampleDrefExplicitLod:
1723                        case spv::OpImageSparseSampleProjImplicitLod:
1724                        case spv::OpImageSparseSampleProjExplicitLod:
1725                        case spv::OpImageSparseSampleProjDrefImplicitLod:
1726                        case spv::OpImageSparseSampleProjDrefExplicitLod:
1727                        case spv::OpImageSparseFetch:
1728                        case spv::OpImageSparseGather:
1729                        case spv::OpImageSparseDrefGather:
1730                        case spv::OpImageTexelPointer:
1731                            worklist.insert(insn.word(3));  // Image or sampled image
1732                            break;
1733                        case spv::OpImageWrite:
1734                            worklist.insert(insn.word(1));  // Image -- different operand order to above
1735                            break;
1736                        case spv::OpFunctionCall:
1737                            for (uint32_t i = 3; i < insn.len(); i++) {
1738                                worklist.insert(insn.word(i));  // fn itself, and all args
1739                            }
1740                            break;
1741
1742                        case spv::OpExtInst:
1743                            for (uint32_t i = 5; i < insn.len(); i++) {
1744                                worklist.insert(insn.word(i));  // Operands to ext inst
1745                            }
1746                            break;
1747                    }
1748                }
1749                break;
1750        }
1751    }
1752
1753    return ids;
1754}
1755
1756static bool validate_push_constant_block_against_pipeline(debug_report_data *report_data,
1757                                                          std::vector<VkPushConstantRange> const *push_constant_ranges,
1758                                                          shader_module const *src, spirv_inst_iter type,
1759                                                          VkShaderStageFlagBits stage) {
1760    bool pass = true;
1761
1762    // Strip off ptrs etc
1763    type = get_struct_type(src, type, false);
1764    assert(type != src->end());
1765
1766    // Validate directly off the offsets. this isn't quite correct for arrays and matrices, but is a good first step.
1767    // TODO: arrays, matrices, weird sizes
1768    for (auto insn : *src) {
1769        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1770            if (insn.word(3) == spv::DecorationOffset) {
1771                unsigned offset = insn.word(4);
1772                auto size = 4;  // Bytes; TODO: calculate this based on the type
1773
1774                bool found_range = false;
1775                for (auto const &range : *push_constant_ranges) {
1776                    if (range.offset <= offset && range.offset + range.size >= offset + size) {
1777                        found_range = true;
1778
1779                        if ((range.stageFlags & stage) == 0) {
1780                            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
1781                                        __LINE__, SHADER_CHECKER_PUSH_CONSTANT_NOT_ACCESSIBLE_FROM_STAGE, "SC",
1782                                        "Push constant range covering variable starting at "
1783                                        "offset %u not accessible from stage %s",
1784                                        offset, string_VkShaderStageFlagBits(stage))) {
1785                                pass = false;
1786                            }
1787                        }
1788
1789                        break;
1790                    }
1791                }
1792
1793                if (!found_range) {
1794                    if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
1795                                SHADER_CHECKER_PUSH_CONSTANT_OUT_OF_RANGE, "SC",
1796                                "Push constant range covering variable starting at "
1797                                "offset %u not declared in layout",
1798                                offset)) {
1799                        pass = false;
1800                    }
1801                }
1802            }
1803        }
1804    }
1805
1806    return pass;
1807}
1808
1809static bool validate_push_constant_usage(debug_report_data *report_data,
1810                                         std::vector<VkPushConstantRange> const *push_constant_ranges, shader_module const *src,
1811                                         std::unordered_set<uint32_t> accessible_ids, VkShaderStageFlagBits stage) {
1812    bool pass = true;
1813
1814    for (auto id : accessible_ids) {
1815        auto def_insn = src->get_def(id);
1816        if (def_insn.opcode() == spv::OpVariable && def_insn.word(3) == spv::StorageClassPushConstant) {
1817            pass &= validate_push_constant_block_against_pipeline(report_data, push_constant_ranges, src,
1818                                                                  src->get_def(def_insn.word(1)), stage);
1819        }
1820    }
1821
1822    return pass;
1823}
1824
1825// For given pipelineLayout verify that the set_layout_node at slot.first
1826//  has the requested binding at slot.second and return ptr to that binding
1827static VkDescriptorSetLayoutBinding const *get_descriptor_binding(PIPELINE_LAYOUT_NODE const *pipelineLayout,
1828                                                                  descriptor_slot_t slot) {
1829    if (!pipelineLayout) return nullptr;
1830
1831    if (slot.first >= pipelineLayout->set_layouts.size()) return nullptr;
1832
1833    return pipelineLayout->set_layouts[slot.first]->GetDescriptorSetLayoutBindingPtrFromBinding(slot.second);
1834}
1835
1836// Check object status for selected flag state
1837static bool validate_status(layer_data *dev_data, GLOBAL_CB_NODE *pNode, CBStatusFlags status_mask, VkFlags msg_flags,
1838                            const char *fail_msg, UNIQUE_VALIDATION_ERROR_CODE const msg_code) {
1839    if (!(pNode->status & status_mask)) {
1840        char const *const message = validation_error_map[msg_code];
1841        return log_msg(dev_data->report_data, msg_flags, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1842                       reinterpret_cast<const uint64_t &>(pNode->commandBuffer), __LINE__, msg_code, "DS",
1843                       "command buffer object 0x%p: %s. %s.", pNode->commandBuffer, fail_msg, message);
1844    }
1845    return false;
1846}
1847
1848// Retrieve pipeline node ptr for given pipeline object
1849static PIPELINE_STATE *getPipelineState(layer_data const *dev_data, VkPipeline pipeline) {
1850    auto it = dev_data->pipelineMap.find(pipeline);
1851    if (it == dev_data->pipelineMap.end()) {
1852        return nullptr;
1853    }
1854    return it->second;
1855}
1856
1857RENDER_PASS_STATE *GetRenderPassState(layer_data const *dev_data, VkRenderPass renderpass) {
1858    auto it = dev_data->renderPassMap.find(renderpass);
1859    if (it == dev_data->renderPassMap.end()) {
1860        return nullptr;
1861    }
1862    return it->second.get();
1863}
1864
1865FRAMEBUFFER_STATE *GetFramebufferState(const layer_data *dev_data, VkFramebuffer framebuffer) {
1866    auto it = dev_data->frameBufferMap.find(framebuffer);
1867    if (it == dev_data->frameBufferMap.end()) {
1868        return nullptr;
1869    }
1870    return it->second.get();
1871}
1872
1873cvdescriptorset::DescriptorSetLayout const *GetDescriptorSetLayout(layer_data const *dev_data, VkDescriptorSetLayout dsLayout) {
1874    auto it = dev_data->descriptorSetLayoutMap.find(dsLayout);
1875    if (it == dev_data->descriptorSetLayoutMap.end()) {
1876        return nullptr;
1877    }
1878    return it->second;
1879}
1880
1881static PIPELINE_LAYOUT_NODE const *getPipelineLayout(layer_data const *dev_data, VkPipelineLayout pipeLayout) {
1882    auto it = dev_data->pipelineLayoutMap.find(pipeLayout);
1883    if (it == dev_data->pipelineLayoutMap.end()) {
1884        return nullptr;
1885    }
1886    return &it->second;
1887}
1888
1889// Return true if for a given PSO, the given state enum is dynamic, else return false
1890static bool isDynamic(const PIPELINE_STATE *pPipeline, const VkDynamicState state) {
1891    if (pPipeline && pPipeline->graphicsPipelineCI.pDynamicState) {
1892        for (uint32_t i = 0; i < pPipeline->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
1893            if (state == pPipeline->graphicsPipelineCI.pDynamicState->pDynamicStates[i]) return true;
1894        }
1895    }
1896    return false;
1897}
1898
1899// Validate state stored as flags at time of draw call
1900static bool validate_draw_state_flags(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const PIPELINE_STATE *pPipe, bool indexed,
1901                                      UNIQUE_VALIDATION_ERROR_CODE const msg_code) {
1902    bool result = false;
1903    if (pPipe->graphicsPipelineCI.pInputAssemblyState &&
1904        ((pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST) ||
1905         (pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP))) {
1906        result |= validate_status(dev_data, pCB, CBSTATUS_LINE_WIDTH_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
1907                                  "Dynamic line width state not set for this command buffer", msg_code);
1908    }
1909    if (pPipe->graphicsPipelineCI.pRasterizationState &&
1910        (pPipe->graphicsPipelineCI.pRasterizationState->depthBiasEnable == VK_TRUE)) {
1911        result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BIAS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
1912                                  "Dynamic depth bias state not set for this command buffer", msg_code);
1913    }
1914    if (pPipe->blendConstantsEnabled) {
1915        result |= validate_status(dev_data, pCB, CBSTATUS_BLEND_CONSTANTS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
1916                                  "Dynamic blend constants state not set for this command buffer", msg_code);
1917    }
1918    if (pPipe->graphicsPipelineCI.pDepthStencilState &&
1919        (pPipe->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE)) {
1920        result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BOUNDS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
1921                                  "Dynamic depth bounds state not set for this command buffer", msg_code);
1922    }
1923    if (pPipe->graphicsPipelineCI.pDepthStencilState &&
1924        (pPipe->graphicsPipelineCI.pDepthStencilState->stencilTestEnable == VK_TRUE)) {
1925        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_READ_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
1926                                  "Dynamic stencil read mask state not set for this command buffer", msg_code);
1927        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_WRITE_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
1928                                  "Dynamic stencil write mask state not set for this command buffer", msg_code);
1929        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_REFERENCE_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
1930                                  "Dynamic stencil reference state not set for this command buffer", msg_code);
1931    }
1932    if (indexed) {
1933        result |= validate_status(dev_data, pCB, CBSTATUS_INDEX_BUFFER_BOUND, VK_DEBUG_REPORT_ERROR_BIT_EXT,
1934                                  "Index buffer object not bound to this command buffer when Indexed Draw attempted", msg_code);
1935    }
1936
1937    return result;
1938}
1939
1940// Verify attachment reference compatibility according to spec
1941//  If one array is larger, treat missing elements of shorter array as VK_ATTACHMENT_UNUSED & other array much match this
1942//  If both AttachmentReference arrays have requested index, check their corresponding AttachmentDescriptions
1943//   to make sure that format and samples counts match.
1944//  If not, they are not compatible.
1945static bool attachment_references_compatible(const uint32_t index, const VkAttachmentReference *pPrimary,
1946                                             const uint32_t primaryCount, const VkAttachmentDescription *pPrimaryAttachments,
1947                                             const VkAttachmentReference *pSecondary, const uint32_t secondaryCount,
1948                                             const VkAttachmentDescription *pSecondaryAttachments) {
1949    // Check potential NULL cases first to avoid nullptr issues later
1950    if (pPrimary == nullptr) {
1951        if (pSecondary == nullptr) {
1952            return true;
1953        }
1954        return false;
1955    } else if (pSecondary == nullptr) {
1956        return false;
1957    }
1958    if (index >= primaryCount) {  // Check secondary as if primary is VK_ATTACHMENT_UNUSED
1959        if (VK_ATTACHMENT_UNUSED == pSecondary[index].attachment) return true;
1960    } else if (index >= secondaryCount) {  // Check primary as if secondary is VK_ATTACHMENT_UNUSED
1961        if (VK_ATTACHMENT_UNUSED == pPrimary[index].attachment) return true;
1962    } else {  // Format and sample count must match
1963        if ((pPrimary[index].attachment == VK_ATTACHMENT_UNUSED) && (pSecondary[index].attachment == VK_ATTACHMENT_UNUSED)) {
1964            return true;
1965        } else if ((pPrimary[index].attachment == VK_ATTACHMENT_UNUSED) || (pSecondary[index].attachment == VK_ATTACHMENT_UNUSED)) {
1966            return false;
1967        }
1968        if ((pPrimaryAttachments[pPrimary[index].attachment].format ==
1969             pSecondaryAttachments[pSecondary[index].attachment].format) &&
1970            (pPrimaryAttachments[pPrimary[index].attachment].samples ==
1971             pSecondaryAttachments[pSecondary[index].attachment].samples))
1972            return true;
1973    }
1974    // Format and sample counts didn't match
1975    return false;
1976}
1977// TODO : Scrub verify_renderpass_compatibility() and validateRenderPassCompatibility() and unify them and/or share code
1978// For given primary RenderPass object and secondary RenderPassCreateInfo, verify that they're compatible
1979static bool verify_renderpass_compatibility(const layer_data *dev_data, const VkRenderPassCreateInfo *primaryRPCI,
1980                                            const VkRenderPassCreateInfo *secondaryRPCI, string &errorMsg) {
1981    if (primaryRPCI->subpassCount != secondaryRPCI->subpassCount) {
1982        stringstream errorStr;
1983        errorStr << "RenderPass for primary cmdBuffer has " << primaryRPCI->subpassCount
1984                 << " subpasses but renderPass for secondary cmdBuffer has " << secondaryRPCI->subpassCount << " subpasses.";
1985        errorMsg = errorStr.str();
1986        return false;
1987    }
1988    uint32_t spIndex = 0;
1989    for (spIndex = 0; spIndex < primaryRPCI->subpassCount; ++spIndex) {
1990        // For each subpass, verify that corresponding color, input, resolve & depth/stencil attachment references are compatible
1991        uint32_t primaryColorCount = primaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
1992        uint32_t secondaryColorCount = secondaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
1993        uint32_t colorMax = std::max(primaryColorCount, secondaryColorCount);
1994        for (uint32_t cIdx = 0; cIdx < colorMax; ++cIdx) {
1995            if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pColorAttachments, primaryColorCount,
1996                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pColorAttachments,
1997                                                  secondaryColorCount, secondaryRPCI->pAttachments)) {
1998                stringstream errorStr;
1999                errorStr << "color attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2000                errorMsg = errorStr.str();
2001                return false;
2002            } else if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2003                                                         primaryColorCount, primaryRPCI->pAttachments,
2004                                                         secondaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2005                                                         secondaryColorCount, secondaryRPCI->pAttachments)) {
2006                stringstream errorStr;
2007                errorStr << "resolve attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2008                errorMsg = errorStr.str();
2009                return false;
2010            }
2011        }
2012
2013        if (!attachment_references_compatible(0, primaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment, 1,
2014                                              primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2015                                              1, secondaryRPCI->pAttachments)) {
2016            stringstream errorStr;
2017            errorStr << "depth/stencil attachments of subpass index " << spIndex << " are not compatible.";
2018            errorMsg = errorStr.str();
2019            return false;
2020        }
2021
2022        uint32_t primaryInputCount = primaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2023        uint32_t secondaryInputCount = secondaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2024        uint32_t inputMax = std::max(primaryInputCount, secondaryInputCount);
2025        for (uint32_t i = 0; i < inputMax; ++i) {
2026            if (!attachment_references_compatible(i, primaryRPCI->pSubpasses[spIndex].pInputAttachments, primaryColorCount,
2027                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pInputAttachments,
2028                                                  secondaryColorCount, secondaryRPCI->pAttachments)) {
2029                stringstream errorStr;
2030                errorStr << "input attachments at index " << i << " of subpass index " << spIndex << " are not compatible.";
2031                errorMsg = errorStr.str();
2032                return false;
2033            }
2034        }
2035    }
2036    return true;
2037}
2038
2039// For given cvdescriptorset::DescriptorSet, verify that its Set is compatible w/ the setLayout corresponding to
2040// pipelineLayout[layoutIndex]
2041static bool verify_set_layout_compatibility(const cvdescriptorset::DescriptorSet *descriptor_set,
2042                                            PIPELINE_LAYOUT_NODE const *pipeline_layout, const uint32_t layoutIndex,
2043                                            string &errorMsg) {
2044    auto num_sets = pipeline_layout->set_layouts.size();
2045    if (layoutIndex >= num_sets) {
2046        stringstream errorStr;
2047        errorStr << "VkPipelineLayout (" << pipeline_layout->layout << ") only contains " << num_sets
2048                 << " setLayouts corresponding to sets 0-" << num_sets - 1 << ", but you're attempting to bind set to index "
2049                 << layoutIndex;
2050        errorMsg = errorStr.str();
2051        return false;
2052    }
2053    auto layout_node = pipeline_layout->set_layouts[layoutIndex];
2054    return descriptor_set->IsCompatible(layout_node, &errorMsg);
2055}
2056
2057// Validate that data for each specialization entry is fully contained within the buffer.
2058static bool validate_specialization_offsets(debug_report_data *report_data, VkPipelineShaderStageCreateInfo const *info) {
2059    bool pass = true;
2060
2061    VkSpecializationInfo const *spec = info->pSpecializationInfo;
2062
2063    if (spec) {
2064        for (auto i = 0u; i < spec->mapEntryCount; i++) {
2065            // TODO: This is a good place for VALIDATION_ERROR_00589.
2066            if (spec->pMapEntries[i].offset + spec->pMapEntries[i].size > spec->dataSize) {
2067                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 0, __LINE__,
2068                            VALIDATION_ERROR_00590, "SC",
2069                            "Specialization entry %u (for constant id %u) references memory outside provided "
2070                            "specialization data (bytes %u.." PRINTF_SIZE_T_SPECIFIER "; " PRINTF_SIZE_T_SPECIFIER
2071                            " bytes provided). %s.",
2072                            i, spec->pMapEntries[i].constantID, spec->pMapEntries[i].offset,
2073                            spec->pMapEntries[i].offset + spec->pMapEntries[i].size - 1, spec->dataSize,
2074                            validation_error_map[VALIDATION_ERROR_00590])) {
2075                    pass = false;
2076                }
2077            }
2078        }
2079    }
2080
2081    return pass;
2082}
2083
2084static bool descriptor_type_match(shader_module const *module, uint32_t type_id, VkDescriptorType descriptor_type,
2085                                  unsigned &descriptor_count) {
2086    auto type = module->get_def(type_id);
2087
2088    descriptor_count = 1;
2089
2090    // Strip off any array or ptrs. Where we remove array levels, adjust the  descriptor count for each dimension.
2091    while (type.opcode() == spv::OpTypeArray || type.opcode() == spv::OpTypePointer) {
2092        if (type.opcode() == spv::OpTypeArray) {
2093            descriptor_count *= get_constant_value(module, type.word(3));
2094            type = module->get_def(type.word(2));
2095        } else {
2096            type = module->get_def(type.word(3));
2097        }
2098    }
2099
2100    switch (type.opcode()) {
2101        case spv::OpTypeStruct: {
2102            for (auto insn : *module) {
2103                if (insn.opcode() == spv::OpDecorate && insn.word(1) == type.word(1)) {
2104                    if (insn.word(2) == spv::DecorationBlock) {
2105                        return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
2106                               descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
2107                    } else if (insn.word(2) == spv::DecorationBufferBlock) {
2108                        return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
2109                               descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
2110                    }
2111                }
2112            }
2113
2114            // Invalid
2115            return false;
2116        }
2117
2118        case spv::OpTypeSampler:
2119            return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLER || descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
2120
2121        case spv::OpTypeSampledImage:
2122            if (descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) {
2123                // Slight relaxation for some GLSL historical madness: samplerBuffer doesn't really have a sampler, and a texel
2124                // buffer descriptor doesn't really provide one. Allow this slight mismatch.
2125                auto image_type = module->get_def(type.word(2));
2126                auto dim = image_type.word(3);
2127                auto sampled = image_type.word(7);
2128                return dim == spv::DimBuffer && sampled == 1;
2129            }
2130            return descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
2131
2132        case spv::OpTypeImage: {
2133            // Many descriptor types backing image types-- depends on dimension and whether the image will be used with a sampler.
2134            // SPIRV for Vulkan requires that sampled be 1 or 2 -- leaving the decision to runtime is unacceptable.
2135            auto dim = type.word(3);
2136            auto sampled = type.word(7);
2137
2138            if (dim == spv::DimSubpassData) {
2139                return descriptor_type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
2140            } else if (dim == spv::DimBuffer) {
2141                if (sampled == 1) {
2142                    return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
2143                } else {
2144                    return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
2145                }
2146            } else if (sampled == 1) {
2147                return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE ||
2148                       descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
2149            } else {
2150                return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
2151            }
2152        }
2153
2154        // We shouldn't really see any other junk types -- but if we do, they're a mismatch.
2155        default:
2156            return false;  // Mismatch
2157    }
2158}
2159
2160static bool require_feature(debug_report_data *report_data, VkBool32 feature, char const *feature_name) {
2161    if (!feature) {
2162        if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
2163                    SHADER_CHECKER_FEATURE_NOT_ENABLED, "SC",
2164                    "Shader requires VkPhysicalDeviceFeatures::%s but is not "
2165                    "enabled on the device",
2166                    feature_name)) {
2167            return false;
2168        }
2169    }
2170
2171    return true;
2172}
2173
2174static bool require_extension(debug_report_data *report_data, bool extension, char const *extension_name) {
2175    if (!extension) {
2176        if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
2177                    SHADER_CHECKER_FEATURE_NOT_ENABLED, "SC",
2178                    "Shader requires extension %s but is not "
2179                    "enabled on the device",
2180                    extension_name)) {
2181            return false;
2182        }
2183    }
2184
2185    return true;
2186}
2187
2188static bool validate_shader_capabilities(layer_data *dev_data, shader_module const *src) {
2189    bool pass = true;
2190
2191    auto report_data = dev_data->report_data;
2192    auto const & enabledFeatures = dev_data->enabled_features;
2193
2194    for (auto insn : *src) {
2195        if (insn.opcode() == spv::OpCapability) {
2196            switch (insn.word(1)) {
2197                case spv::CapabilityMatrix:
2198                case spv::CapabilityShader:
2199                case spv::CapabilityInputAttachment:
2200                case spv::CapabilitySampled1D:
2201                case spv::CapabilityImage1D:
2202                case spv::CapabilitySampledBuffer:
2203                case spv::CapabilityImageBuffer:
2204                case spv::CapabilityImageQuery:
2205                case spv::CapabilityDerivativeControl:
2206                    // Always supported by a Vulkan 1.0 implementation -- no feature bits.
2207                    break;
2208
2209                case spv::CapabilityGeometry:
2210                    pass &= require_feature(report_data, enabledFeatures.geometryShader, "geometryShader");
2211                    break;
2212
2213                case spv::CapabilityTessellation:
2214                    pass &= require_feature(report_data, enabledFeatures.tessellationShader, "tessellationShader");
2215                    break;
2216
2217                case spv::CapabilityFloat64:
2218                    pass &= require_feature(report_data, enabledFeatures.shaderFloat64, "shaderFloat64");
2219                    break;
2220
2221                case spv::CapabilityInt64:
2222                    pass &= require_feature(report_data, enabledFeatures.shaderInt64, "shaderInt64");
2223                    break;
2224
2225                case spv::CapabilityTessellationPointSize:
2226                case spv::CapabilityGeometryPointSize:
2227                    pass &= require_feature(report_data, enabledFeatures.shaderTessellationAndGeometryPointSize,
2228                                            "shaderTessellationAndGeometryPointSize");
2229                    break;
2230
2231                case spv::CapabilityImageGatherExtended:
2232                    pass &= require_feature(report_data, enabledFeatures.shaderImageGatherExtended, "shaderImageGatherExtended");
2233                    break;
2234
2235                case spv::CapabilityStorageImageMultisample:
2236                    pass &= require_feature(report_data, enabledFeatures.shaderStorageImageMultisample,
2237                                            "shaderStorageImageMultisample");
2238                    break;
2239
2240                case spv::CapabilityUniformBufferArrayDynamicIndexing:
2241                    pass &= require_feature(report_data, enabledFeatures.shaderUniformBufferArrayDynamicIndexing,
2242                                            "shaderUniformBufferArrayDynamicIndexing");
2243                    break;
2244
2245                case spv::CapabilitySampledImageArrayDynamicIndexing:
2246                    pass &= require_feature(report_data, enabledFeatures.shaderSampledImageArrayDynamicIndexing,
2247                                            "shaderSampledImageArrayDynamicIndexing");
2248                    break;
2249
2250                case spv::CapabilityStorageBufferArrayDynamicIndexing:
2251                    pass &= require_feature(report_data, enabledFeatures.shaderStorageBufferArrayDynamicIndexing,
2252                                            "shaderStorageBufferArrayDynamicIndexing");
2253                    break;
2254
2255                case spv::CapabilityStorageImageArrayDynamicIndexing:
2256                    pass &= require_feature(report_data, enabledFeatures.shaderStorageImageArrayDynamicIndexing,
2257                                            "shaderStorageImageArrayDynamicIndexing");
2258                    break;
2259
2260                case spv::CapabilityClipDistance:
2261                    pass &= require_feature(report_data, enabledFeatures.shaderClipDistance, "shaderClipDistance");
2262                    break;
2263
2264                case spv::CapabilityCullDistance:
2265                    pass &= require_feature(report_data, enabledFeatures.shaderCullDistance, "shaderCullDistance");
2266                    break;
2267
2268                case spv::CapabilityImageCubeArray:
2269                    pass &= require_feature(report_data, enabledFeatures.imageCubeArray, "imageCubeArray");
2270                    break;
2271
2272                case spv::CapabilitySampleRateShading:
2273                    pass &= require_feature(report_data, enabledFeatures.sampleRateShading, "sampleRateShading");
2274                    break;
2275
2276                case spv::CapabilitySparseResidency:
2277                    pass &= require_feature(report_data, enabledFeatures.shaderResourceResidency, "shaderResourceResidency");
2278                    break;
2279
2280                case spv::CapabilityMinLod:
2281                    pass &= require_feature(report_data, enabledFeatures.shaderResourceMinLod, "shaderResourceMinLod");
2282                    break;
2283
2284                case spv::CapabilitySampledCubeArray:
2285                    pass &= require_feature(report_data, enabledFeatures.imageCubeArray, "imageCubeArray");
2286                    break;
2287
2288                case spv::CapabilityImageMSArray:
2289                    pass &= require_feature(report_data, enabledFeatures.shaderStorageImageMultisample,
2290                                            "shaderStorageImageMultisample");
2291                    break;
2292
2293                case spv::CapabilityStorageImageExtendedFormats:
2294                    pass &= require_feature(report_data, enabledFeatures.shaderStorageImageExtendedFormats,
2295                                            "shaderStorageImageExtendedFormats");
2296                    break;
2297
2298                case spv::CapabilityInterpolationFunction:
2299                    pass &= require_feature(report_data, enabledFeatures.sampleRateShading, "sampleRateShading");
2300                    break;
2301
2302                case spv::CapabilityStorageImageReadWithoutFormat:
2303                    pass &= require_feature(report_data, enabledFeatures.shaderStorageImageReadWithoutFormat,
2304                                            "shaderStorageImageReadWithoutFormat");
2305                    break;
2306
2307                case spv::CapabilityStorageImageWriteWithoutFormat:
2308                    pass &= require_feature(report_data, enabledFeatures.shaderStorageImageWriteWithoutFormat,
2309                                            "shaderStorageImageWriteWithoutFormat");
2310                    break;
2311
2312                case spv::CapabilityMultiViewport:
2313                    pass &= require_feature(report_data, enabledFeatures.multiViewport, "multiViewport");
2314                    break;
2315
2316                case spv::CapabilityDrawParameters:
2317                    pass &= require_extension(report_data, dev_data->device_extensions.khr_shader_draw_parameters_enabled,
2318                                              VK_KHR_SHADER_DRAW_PARAMETERS_EXTENSION_NAME);
2319                    break;
2320
2321                case spv::CapabilityGeometryShaderPassthroughNV:
2322                    pass &= require_extension(report_data, dev_data->device_extensions.nv_geometry_shader_passthrough_enabled,
2323                                              VK_NV_GEOMETRY_SHADER_PASSTHROUGH_EXTENSION_NAME);
2324                    break;
2325
2326                case spv::CapabilitySampleMaskOverrideCoverageNV:
2327                    pass &= require_extension(report_data, dev_data->device_extensions.nv_sample_mask_override_coverage_enabled,
2328                                              VK_NV_SAMPLE_MASK_OVERRIDE_COVERAGE_EXTENSION_NAME);
2329                    break;
2330
2331                case spv::CapabilityShaderViewportIndexLayerNV:
2332                case spv::CapabilityShaderViewportMaskNV:
2333                    pass &= require_extension(report_data, dev_data->device_extensions.nv_viewport_array2_enabled,
2334                                              VK_NV_VIEWPORT_ARRAY2_EXTENSION_NAME);
2335                    break;
2336
2337                case spv::CapabilitySubgroupBallotKHR:
2338                    pass &= require_extension(report_data, dev_data->device_extensions.khr_subgroup_ballot_enabled,
2339                                              VK_EXT_SHADER_SUBGROUP_BALLOT_EXTENSION_NAME);
2340                    break;
2341
2342                case spv::CapabilitySubgroupVoteKHR:
2343                    pass &= require_extension(report_data, dev_data->device_extensions.khr_subgroup_vote_enabled,
2344                                              VK_EXT_SHADER_SUBGROUP_VOTE_EXTENSION_NAME);
2345                    break;
2346
2347                default:
2348                    // Spirv-validator should catch these errors
2349                    break;
2350            }
2351        }
2352    }
2353
2354    return pass;
2355}
2356
2357static uint32_t descriptor_type_to_reqs(shader_module const *module, uint32_t type_id) {
2358    auto type = module->get_def(type_id);
2359
2360    while (true) {
2361        switch (type.opcode()) {
2362            case spv::OpTypeArray:
2363            case spv::OpTypeSampledImage:
2364                type = module->get_def(type.word(2));
2365                break;
2366            case spv::OpTypePointer:
2367                type = module->get_def(type.word(3));
2368                break;
2369            case spv::OpTypeImage: {
2370                auto dim = type.word(3);
2371                auto arrayed = type.word(5);
2372                auto msaa = type.word(6);
2373
2374                switch (dim) {
2375                    case spv::Dim1D:
2376                        return arrayed ? DESCRIPTOR_REQ_VIEW_TYPE_1D_ARRAY : DESCRIPTOR_REQ_VIEW_TYPE_1D;
2377                    case spv::Dim2D:
2378                        return (msaa ? DESCRIPTOR_REQ_MULTI_SAMPLE : DESCRIPTOR_REQ_SINGLE_SAMPLE) |
2379                               (arrayed ? DESCRIPTOR_REQ_VIEW_TYPE_2D_ARRAY : DESCRIPTOR_REQ_VIEW_TYPE_2D);
2380                    case spv::Dim3D:
2381                        return DESCRIPTOR_REQ_VIEW_TYPE_3D;
2382                    case spv::DimCube:
2383                        return arrayed ? DESCRIPTOR_REQ_VIEW_TYPE_CUBE_ARRAY : DESCRIPTOR_REQ_VIEW_TYPE_CUBE;
2384                    case spv::DimSubpassData:
2385                        return msaa ? DESCRIPTOR_REQ_MULTI_SAMPLE : DESCRIPTOR_REQ_SINGLE_SAMPLE;
2386                    default:  // buffer, etc.
2387                        return 0;
2388                }
2389            }
2390            default:
2391                return 0;
2392        }
2393    }
2394}
2395
2396static bool validate_pipeline_shader_stage(
2397    layer_data *dev_data, VkPipelineShaderStageCreateInfo const *pStage, PIPELINE_STATE *pipeline,
2398    shader_module **out_module, spirv_inst_iter *out_entrypoint) {
2399    bool pass = true;
2400    auto module_it = dev_data->shaderModuleMap.find(pStage->module);
2401    auto module = *out_module = module_it->second.get();
2402    auto report_data = dev_data->report_data;
2403
2404    if (!module->has_valid_spirv) return pass;
2405
2406    // Find the entrypoint
2407    auto entrypoint = *out_entrypoint = find_entrypoint(module, pStage->pName, pStage->stage);
2408    if (entrypoint == module->end()) {
2409        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
2410                    VALIDATION_ERROR_00510, "SC", "No entrypoint found named `%s` for stage %s. %s.", pStage->pName,
2411                    string_VkShaderStageFlagBits(pStage->stage), validation_error_map[VALIDATION_ERROR_00510])) {
2412            return false;  // no point continuing beyond here, any analysis is just going to be garbage.
2413        }
2414    }
2415
2416    // Validate shader capabilities against enabled device features
2417    pass &= validate_shader_capabilities(dev_data, module);
2418
2419    // Mark accessible ids
2420    auto accessible_ids = mark_accessible_ids(module, entrypoint);
2421
2422    // Validate descriptor set layout against what the entrypoint actually uses
2423    auto descriptor_uses = collect_interface_by_descriptor_slot(report_data, module, accessible_ids);
2424
2425    auto pipelineLayout = pipeline->pipeline_layout;
2426
2427    pass &= validate_specialization_offsets(report_data, pStage);
2428    pass &= validate_push_constant_usage(report_data, &pipelineLayout.push_constant_ranges, module, accessible_ids, pStage->stage);
2429
2430    // Validate descriptor use
2431    for (auto use : descriptor_uses) {
2432        // While validating shaders capture which slots are used by the pipeline
2433        auto &reqs = pipeline->active_slots[use.first.first][use.first.second];
2434        reqs = descriptor_req(reqs | descriptor_type_to_reqs(module, use.second.type_id));
2435
2436        // Verify given pipelineLayout has requested setLayout with requested binding
2437        const auto &binding = get_descriptor_binding(&pipelineLayout, use.first);
2438        unsigned required_descriptor_count;
2439
2440        if (!binding) {
2441            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
2442                        SHADER_CHECKER_MISSING_DESCRIPTOR, "SC",
2443                        "Shader uses descriptor slot %u.%u (used as type `%s`) but not declared in pipeline layout",
2444                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str())) {
2445                pass = false;
2446            }
2447        } else if (~binding->stageFlags & pStage->stage) {
2448            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 0, __LINE__,
2449                        SHADER_CHECKER_DESCRIPTOR_NOT_ACCESSIBLE_FROM_STAGE, "SC",
2450                        "Shader uses descriptor slot %u.%u (used "
2451                        "as type `%s`) but descriptor not "
2452                        "accessible from stage %s",
2453                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str(),
2454                        string_VkShaderStageFlagBits(pStage->stage))) {
2455                pass = false;
2456            }
2457        } else if (!descriptor_type_match(module, use.second.type_id, binding->descriptorType, required_descriptor_count)) {
2458            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
2459                        SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC",
2460                        "Type mismatch on descriptor slot "
2461                        "%u.%u (used as type `%s`) but "
2462                        "descriptor of type %s",
2463                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str(),
2464                        string_VkDescriptorType(binding->descriptorType))) {
2465                pass = false;
2466            }
2467        } else if (binding->descriptorCount < required_descriptor_count) {
2468            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
2469                        SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC",
2470                        "Shader expects at least %u descriptors for binding %u.%u (used as type `%s`) but only %u provided",
2471                        required_descriptor_count, use.first.first, use.first.second,
2472                        describe_type(module, use.second.type_id).c_str(), binding->descriptorCount)) {
2473                pass = false;
2474            }
2475        }
2476    }
2477
2478    // Validate use of input attachments against subpass structure
2479    if (pStage->stage == VK_SHADER_STAGE_FRAGMENT_BIT) {
2480        auto input_attachment_uses = collect_interface_by_input_attachment_index(module, accessible_ids);
2481
2482        auto rpci = pipeline->render_pass_ci.ptr();
2483        auto subpass = pipeline->graphicsPipelineCI.subpass;
2484
2485        for (auto use : input_attachment_uses) {
2486            auto input_attachments = rpci->pSubpasses[subpass].pInputAttachments;
2487            auto index = (input_attachments && use.first < rpci->pSubpasses[subpass].inputAttachmentCount)
2488                             ? input_attachments[use.first].attachment
2489                             : VK_ATTACHMENT_UNUSED;
2490
2491            if (index == VK_ATTACHMENT_UNUSED) {
2492                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
2493                            SHADER_CHECKER_MISSING_INPUT_ATTACHMENT, "SC",
2494                            "Shader consumes input attachment index %d but not provided in subpass", use.first)) {
2495                    pass = false;
2496                }
2497            } else if (!(get_format_type(rpci->pAttachments[index].format) & get_fundamental_type(module, use.second.type_id))) {
2498                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
2499                            SHADER_CHECKER_INPUT_ATTACHMENT_TYPE_MISMATCH, "SC",
2500                            "Subpass input attachment %u format of %s does not match type used in shader `%s`", use.first,
2501                            string_VkFormat(rpci->pAttachments[index].format), describe_type(module, use.second.type_id).c_str())) {
2502                    pass = false;
2503                }
2504            }
2505        }
2506    }
2507
2508    return pass;
2509}
2510
2511// Validate that the shaders used by the given pipeline and store the active_slots
2512//  that are actually used by the pipeline into pPipeline->active_slots
2513static bool validate_and_capture_pipeline_shader_state(layer_data *dev_data, PIPELINE_STATE *pPipeline) {
2514    auto pCreateInfo = pPipeline->graphicsPipelineCI.ptr();
2515    int vertex_stage = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2516    int fragment_stage = get_shader_stage_id(VK_SHADER_STAGE_FRAGMENT_BIT);
2517
2518    shader_module *shaders[5];
2519    memset(shaders, 0, sizeof(shaders));
2520    spirv_inst_iter entrypoints[5];
2521    memset(entrypoints, 0, sizeof(entrypoints));
2522    bool pass = true;
2523
2524    for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
2525        auto pStage = &pCreateInfo->pStages[i];
2526        auto stage_id = get_shader_stage_id(pStage->stage);
2527        pass &= validate_pipeline_shader_stage(dev_data, pStage, pPipeline, &shaders[stage_id], &entrypoints[stage_id]);
2528    }
2529
2530    // if the shader stages are no good individually, cross-stage validation is pointless.
2531    if (!pass) return false;
2532
2533    auto vi = pCreateInfo->pVertexInputState;
2534
2535    if (vi) {
2536        pass &= validate_vi_consistency(dev_data->report_data, vi);
2537    }
2538
2539    if (shaders[vertex_stage] && shaders[vertex_stage]->has_valid_spirv) {
2540        pass &= validate_vi_against_vs_inputs(dev_data->report_data, vi, shaders[vertex_stage], entrypoints[vertex_stage]);
2541    }
2542
2543    int producer = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2544    int consumer = get_shader_stage_id(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT);
2545
2546    while (!shaders[producer] && producer != fragment_stage) {
2547        producer++;
2548        consumer++;
2549    }
2550
2551    for (; producer != fragment_stage && consumer <= fragment_stage; consumer++) {
2552        assert(shaders[producer]);
2553        if (shaders[consumer] && shaders[consumer]->has_valid_spirv && shaders[producer]->has_valid_spirv) {
2554            pass &= validate_interface_between_stages(dev_data->report_data, shaders[producer], entrypoints[producer],
2555                                                      &shader_stage_attribs[producer], shaders[consumer], entrypoints[consumer],
2556                                                      &shader_stage_attribs[consumer]);
2557
2558            producer = consumer;
2559        }
2560    }
2561
2562    if (shaders[fragment_stage] && shaders[fragment_stage]->has_valid_spirv) {
2563        pass &= validate_fs_outputs_against_render_pass(dev_data->report_data, shaders[fragment_stage], entrypoints[fragment_stage],
2564                                                        pPipeline->render_pass_ci.ptr(), pCreateInfo->subpass);
2565    }
2566
2567    return pass;
2568}
2569
2570static bool validate_compute_pipeline(layer_data *dev_data, PIPELINE_STATE *pPipeline) {
2571    auto pCreateInfo = pPipeline->computePipelineCI.ptr();
2572
2573    shader_module *module;
2574    spirv_inst_iter entrypoint;
2575
2576    return validate_pipeline_shader_stage(dev_data, &pCreateInfo->stage, pPipeline, &module, &entrypoint);
2577}
2578// Return Set node ptr for specified set or else NULL
2579cvdescriptorset::DescriptorSet *GetSetNode(const layer_data *dev_data, VkDescriptorSet set) {
2580    auto set_it = dev_data->setMap.find(set);
2581    if (set_it == dev_data->setMap.end()) {
2582        return NULL;
2583    }
2584    return set_it->second;
2585}
2586
2587// For given pipeline, return number of MSAA samples, or one if MSAA disabled
2588static VkSampleCountFlagBits getNumSamples(PIPELINE_STATE const *pipe) {
2589    if (pipe->graphicsPipelineCI.pMultisampleState != NULL &&
2590        VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO == pipe->graphicsPipelineCI.pMultisampleState->sType) {
2591        return pipe->graphicsPipelineCI.pMultisampleState->rasterizationSamples;
2592    }
2593    return VK_SAMPLE_COUNT_1_BIT;
2594}
2595
2596static void list_bits(std::ostream &s, uint32_t bits) {
2597    for (int i = 0; i < 32 && bits; i++) {
2598        if (bits & (1 << i)) {
2599            s << i;
2600            bits &= ~(1 << i);
2601            if (bits) {
2602                s << ",";
2603            }
2604        }
2605    }
2606}
2607
2608// Validate draw-time state related to the PSO
2609static bool ValidatePipelineDrawtimeState(layer_data const *dev_data, LAST_BOUND_STATE const &state, const GLOBAL_CB_NODE *pCB,
2610                                          PIPELINE_STATE const *pPipeline) {
2611    bool skip = false;
2612
2613    // Verify vertex binding
2614    if (pPipeline->vertexBindingDescriptions.size() > 0) {
2615        for (size_t i = 0; i < pPipeline->vertexBindingDescriptions.size(); i++) {
2616            auto vertex_binding = pPipeline->vertexBindingDescriptions[i].binding;
2617            if ((pCB->currentDrawData.buffers.size() < (vertex_binding + 1)) ||
2618                (pCB->currentDrawData.buffers[vertex_binding] == VK_NULL_HANDLE)) {
2619                skip |=
2620                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2621                            reinterpret_cast<uint64_t>(pCB->commandBuffer), __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
2622                            "The Pipeline State Object (0x%" PRIxLEAST64
2623                            ") expects that this Command Buffer's vertex binding Index %u "
2624                            "should be set via vkCmdBindVertexBuffers. This is because VkVertexInputBindingDescription struct "
2625                            "at index " PRINTF_SIZE_T_SPECIFIER " of pVertexBindingDescriptions has a binding value of %u.",
2626                            (uint64_t)state.pipeline_state->pipeline, vertex_binding, i, vertex_binding);
2627            }
2628        }
2629    } else {
2630        if (!pCB->currentDrawData.buffers.empty() && !pCB->vertex_buffer_used) {
2631            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
2632                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(pCB->commandBuffer),
2633                            __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
2634                            "Vertex buffers are bound to command buffer (0x%p"
2635                            ") but no vertex buffers are attached to this Pipeline State Object (0x%" PRIxLEAST64 ").",
2636                            pCB->commandBuffer, (uint64_t)state.pipeline_state->pipeline);
2637        }
2638    }
2639    // If Viewport or scissors are dynamic, verify that dynamic count matches PSO count.
2640    // Skip check if rasterization is disabled or there is no viewport.
2641    if ((!pPipeline->graphicsPipelineCI.pRasterizationState ||
2642         (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) &&
2643        pPipeline->graphicsPipelineCI.pViewportState) {
2644        bool dynViewport = isDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
2645        bool dynScissor = isDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
2646
2647        if (dynViewport) {
2648            auto requiredViewportsMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->viewportCount) - 1;
2649            auto missingViewportMask = ~pCB->viewportMask & requiredViewportsMask;
2650            if (missingViewportMask) {
2651                std::stringstream ss;
2652                ss << "Dynamic viewport(s) ";
2653                list_bits(ss, missingViewportMask);
2654                ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetViewport().";
2655                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
2656                                __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", "%s", ss.str().c_str());
2657            }
2658        }
2659
2660        if (dynScissor) {
2661            auto requiredScissorMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->scissorCount) - 1;
2662            auto missingScissorMask = ~pCB->scissorMask & requiredScissorMask;
2663            if (missingScissorMask) {
2664                std::stringstream ss;
2665                ss << "Dynamic scissor(s) ";
2666                list_bits(ss, missingScissorMask);
2667                ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetScissor().";
2668                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
2669                                __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", "%s", ss.str().c_str());
2670            }
2671        }
2672    }
2673
2674    // Verify that any MSAA request in PSO matches sample# in bound FB
2675    // Skip the check if rasterization is disabled.
2676    if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
2677        (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
2678        VkSampleCountFlagBits pso_num_samples = getNumSamples(pPipeline);
2679        if (pCB->activeRenderPass) {
2680            auto const render_pass_info = pCB->activeRenderPass->createInfo.ptr();
2681            const VkSubpassDescription *subpass_desc = &render_pass_info->pSubpasses[pCB->activeSubpass];
2682            uint32_t i;
2683            unsigned subpass_num_samples = 0;
2684
2685            for (i = 0; i < subpass_desc->colorAttachmentCount; i++) {
2686                auto attachment = subpass_desc->pColorAttachments[i].attachment;
2687                if (attachment != VK_ATTACHMENT_UNUSED)
2688                    subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
2689            }
2690
2691            if (subpass_desc->pDepthStencilAttachment &&
2692                subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
2693                auto attachment = subpass_desc->pDepthStencilAttachment->attachment;
2694                subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
2695            }
2696
2697            if (subpass_num_samples && static_cast<unsigned>(pso_num_samples) != subpass_num_samples) {
2698                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
2699                                reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH,
2700                                "DS", "Num samples mismatch! At draw-time in Pipeline (0x%" PRIxLEAST64
2701                                      ") with %u samples while current RenderPass (0x%" PRIxLEAST64 ") w/ %u samples!",
2702                                reinterpret_cast<const uint64_t &>(pPipeline->pipeline), pso_num_samples,
2703                                reinterpret_cast<const uint64_t &>(pCB->activeRenderPass->renderPass), subpass_num_samples);
2704            }
2705        } else {
2706            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
2707                            reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
2708                            "No active render pass found at draw-time in Pipeline (0x%" PRIxLEAST64 ")!",
2709                            reinterpret_cast<const uint64_t &>(pPipeline->pipeline));
2710        }
2711    }
2712    // Verify that PSO creation renderPass is compatible with active renderPass
2713    if (pCB->activeRenderPass) {
2714        std::string err_string;
2715        if ((pCB->activeRenderPass->renderPass != pPipeline->graphicsPipelineCI.renderPass) &&
2716            !verify_renderpass_compatibility(dev_data, pCB->activeRenderPass->createInfo.ptr(), pPipeline->render_pass_ci.ptr(),
2717                                             err_string)) {
2718            // renderPass that PSO was created with must be compatible with active renderPass that PSO is being used with
2719            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
2720                            reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE,
2721                            "DS", "At Draw time the active render pass (0x%" PRIxLEAST64
2722                                  ") is incompatible w/ gfx pipeline "
2723                                  "(0x%" PRIxLEAST64 ") that was created w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
2724                            reinterpret_cast<uint64_t &>(pCB->activeRenderPass->renderPass),
2725                            reinterpret_cast<uint64_t const &>(pPipeline->pipeline),
2726                            reinterpret_cast<const uint64_t &>(pPipeline->graphicsPipelineCI.renderPass), err_string.c_str());
2727        }
2728
2729        if (pPipeline->graphicsPipelineCI.subpass != pCB->activeSubpass) {
2730            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
2731                            reinterpret_cast<uint64_t const &>(pPipeline->pipeline), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE,
2732                            "DS", "Pipeline was built for subpass %u but used in subpass %u", pPipeline->graphicsPipelineCI.subpass,
2733                            pCB->activeSubpass);
2734        }
2735    }
2736    // TODO : Add more checks here
2737
2738    return skip;
2739}
2740
2741// Validate overall state at the time of a draw call
2742static bool ValidateDrawState(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, const bool indexed,
2743                              const VkPipelineBindPoint bind_point, const char *function,
2744                              UNIQUE_VALIDATION_ERROR_CODE const msg_code) {
2745    bool result = false;
2746    auto const &state = cb_node->lastBound[bind_point];
2747    PIPELINE_STATE *pPipe = state.pipeline_state;
2748    if (nullptr == pPipe) {
2749        result |= log_msg(
2750            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2751            reinterpret_cast<uint64_t>(cb_node->commandBuffer), __LINE__, DRAWSTATE_INVALID_PIPELINE, "DS",
2752            "At Draw/Dispatch time no valid VkPipeline is bound! This is illegal. Please bind one with vkCmdBindPipeline().");
2753        // Early return as any further checks below will be busted w/o a pipeline
2754        if (result) return true;
2755    }
2756    // First check flag states
2757    if (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point)
2758        result = validate_draw_state_flags(dev_data, cb_node, pPipe, indexed, msg_code);
2759
2760    // Now complete other state checks
2761    if (VK_NULL_HANDLE != state.pipeline_layout.layout) {
2762        string errorString;
2763        auto pipeline_layout = pPipe->pipeline_layout;
2764
2765        for (const auto &set_binding_pair : pPipe->active_slots) {
2766            uint32_t setIndex = set_binding_pair.first;
2767            // If valid set is not bound throw an error
2768            if ((state.boundDescriptorSets.size() <= setIndex) || (!state.boundDescriptorSets[setIndex])) {
2769                result |= log_msg(
2770                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2771                    reinterpret_cast<uint64_t>(cb_node->commandBuffer), __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_BOUND, "DS",
2772                    "VkPipeline 0x%" PRIxLEAST64 " uses set #%u but that set is not bound.", (uint64_t)pPipe->pipeline, setIndex);
2773            } else if (!verify_set_layout_compatibility(state.boundDescriptorSets[setIndex], &pipeline_layout, setIndex,
2774                                                        errorString)) {
2775                // Set is bound but not compatible w/ overlapping pipeline_layout from PSO
2776                VkDescriptorSet setHandle = state.boundDescriptorSets[setIndex]->GetSet();
2777                result |=
2778                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2779                            (uint64_t)setHandle, __LINE__, DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
2780                            "VkDescriptorSet (0x%" PRIxLEAST64
2781                            ") bound as set #%u is not compatible with overlapping VkPipelineLayout 0x%" PRIxLEAST64 " due to: %s",
2782                            reinterpret_cast<uint64_t &>(setHandle), setIndex, reinterpret_cast<uint64_t &>(pipeline_layout.layout),
2783                            errorString.c_str());
2784            } else {  // Valid set is bound and layout compatible, validate that it's updated
2785                // Pull the set node
2786                cvdescriptorset::DescriptorSet *descriptor_set = state.boundDescriptorSets[setIndex];
2787                // Gather active bindings
2788                std::unordered_set<uint32_t> active_bindings;
2789                for (auto binding : set_binding_pair.second) {
2790                    active_bindings.insert(binding.first);
2791                }
2792                // Make sure set has been updated if it has no immutable samplers
2793                //  If it has immutable samplers, we'll flag error later as needed depending on binding
2794                if (!descriptor_set->IsUpdated()) {
2795                    for (auto binding : active_bindings) {
2796                        if (!descriptor_set->GetImmutableSamplerPtrFromBinding(binding)) {
2797                            result |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2798                                              VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)descriptor_set->GetSet(),
2799                                              __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
2800                                              "Descriptor Set 0x%" PRIxLEAST64
2801                                              " bound but was never updated. It is now being used to draw so "
2802                                              "this will result in undefined behavior.",
2803                                              (uint64_t)descriptor_set->GetSet());
2804                        }
2805                    }
2806                }
2807                // Validate the draw-time state for this descriptor set
2808                std::string err_str;
2809                if (!descriptor_set->ValidateDrawState(set_binding_pair.second, state.dynamicOffsets[setIndex], cb_node, function,
2810                                                       &err_str)) {
2811                    auto set = descriptor_set->GetSet();
2812                    result |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2813                                      VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, reinterpret_cast<const uint64_t &>(set),
2814                                      __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
2815                                      "Descriptor set 0x%" PRIxLEAST64 " encountered the following validation error at %s time: %s",
2816                                      reinterpret_cast<const uint64_t &>(set), function, err_str.c_str());
2817                }
2818            }
2819        }
2820    }
2821
2822    // Check general pipeline state that needs to be validated at drawtime
2823    if (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point) result |= ValidatePipelineDrawtimeState(dev_data, state, cb_node, pPipe);
2824
2825    return result;
2826}
2827
2828static void UpdateDrawState(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, const VkPipelineBindPoint bind_point) {
2829    auto const &state = cb_state->lastBound[bind_point];
2830    PIPELINE_STATE *pPipe = state.pipeline_state;
2831    if (VK_NULL_HANDLE != state.pipeline_layout.layout) {
2832        for (const auto &set_binding_pair : pPipe->active_slots) {
2833            uint32_t setIndex = set_binding_pair.first;
2834            // Pull the set node
2835            cvdescriptorset::DescriptorSet *descriptor_set = state.boundDescriptorSets[setIndex];
2836            // Bind this set and its active descriptor resources to the command buffer
2837            descriptor_set->BindCommandBuffer(cb_state, set_binding_pair.second);
2838            // For given active slots record updated images & buffers
2839            descriptor_set->GetStorageUpdates(set_binding_pair.second, &cb_state->updateBuffers, &cb_state->updateImages);
2840        }
2841    }
2842    if (pPipe->vertexBindingDescriptions.size() > 0) {
2843        cb_state->vertex_buffer_used = true;
2844    }
2845}
2846
2847// Validate HW line width capabilities prior to setting requested line width.
2848static bool verifyLineWidth(layer_data *dev_data, DRAW_STATE_ERROR dsError, VulkanObjectType object_type, const uint64_t &target,
2849                            float lineWidth) {
2850    bool skip = false;
2851
2852    // First check to see if the physical device supports wide lines.
2853    if ((VK_FALSE == dev_data->enabled_features.wideLines) && (1.0f != lineWidth)) {
2854        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, get_debug_report_enum[object_type], target, __LINE__,
2855                        dsError, "DS",
2856                        "Attempt to set lineWidth to %f but physical device wideLines feature "
2857                        "not supported/enabled so lineWidth must be 1.0f!",
2858                        lineWidth);
2859    } else {
2860        // Otherwise, make sure the width falls in the valid range.
2861        if ((dev_data->phys_dev_properties.properties.limits.lineWidthRange[0] > lineWidth) ||
2862            (dev_data->phys_dev_properties.properties.limits.lineWidthRange[1] < lineWidth)) {
2863            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, get_debug_report_enum[object_type], target,
2864                            __LINE__, dsError, "DS",
2865                            "Attempt to set lineWidth to %f but physical device limits line width "
2866                            "to between [%f, %f]!",
2867                            lineWidth, dev_data->phys_dev_properties.properties.limits.lineWidthRange[0],
2868                            dev_data->phys_dev_properties.properties.limits.lineWidthRange[1]);
2869        }
2870    }
2871
2872    return skip;
2873}
2874
2875// Verify that create state for a pipeline is valid
2876static bool verifyPipelineCreateState(layer_data *dev_data, std::vector<PIPELINE_STATE *> pPipelines, int pipelineIndex) {
2877    bool skip = false;
2878
2879    PIPELINE_STATE *pPipeline = pPipelines[pipelineIndex];
2880
2881    // If create derivative bit is set, check that we've specified a base
2882    // pipeline correctly, and that the base pipeline was created to allow
2883    // derivatives.
2884    if (pPipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
2885        PIPELINE_STATE *pBasePipeline = nullptr;
2886        if (!((pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) ^
2887              (pPipeline->graphicsPipelineCI.basePipelineIndex != -1))) {
2888            // This check is a superset of VALIDATION_ERROR_00526 and VALIDATION_ERROR_00528
2889            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
2890                            reinterpret_cast<uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE,
2891                            "DS", "Invalid Pipeline CreateInfo: exactly one of base pipeline index and handle must be specified");
2892        } else if (pPipeline->graphicsPipelineCI.basePipelineIndex != -1) {
2893            if (pPipeline->graphicsPipelineCI.basePipelineIndex >= pipelineIndex) {
2894                skip |=
2895                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
2896                            reinterpret_cast<uint64_t &>(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_00518, "DS",
2897                            "Invalid Pipeline CreateInfo: base pipeline must occur earlier in array than derivative pipeline. %s",
2898                            validation_error_map[VALIDATION_ERROR_00518]);
2899            } else {
2900                pBasePipeline = pPipelines[pPipeline->graphicsPipelineCI.basePipelineIndex];
2901            }
2902        } else if (pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) {
2903            pBasePipeline = getPipelineState(dev_data, pPipeline->graphicsPipelineCI.basePipelineHandle);
2904        }
2905
2906        if (pBasePipeline && !(pBasePipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) {
2907            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
2908                            reinterpret_cast<uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE,
2909                            "DS", "Invalid Pipeline CreateInfo: base pipeline does not allow derivatives.");
2910        }
2911    }
2912
2913    if (pPipeline->graphicsPipelineCI.pColorBlendState != NULL) {
2914        const safe_VkPipelineColorBlendStateCreateInfo *color_blend_state = pPipeline->graphicsPipelineCI.pColorBlendState;
2915        auto const render_pass_info = GetRenderPassState(dev_data, pPipeline->graphicsPipelineCI.renderPass)->createInfo.ptr();
2916        const VkSubpassDescription *subpass_desc = &render_pass_info->pSubpasses[pPipeline->graphicsPipelineCI.subpass];
2917        if (color_blend_state->attachmentCount != subpass_desc->colorAttachmentCount) {
2918            skip |= log_msg(
2919                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
2920                reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_02109, "DS",
2921                "vkCreateGraphicsPipelines(): Render pass (0x%" PRIxLEAST64
2922                ") subpass %u has colorAttachmentCount of %u which doesn't match the pColorBlendState->attachmentCount of %u. %s",
2923                reinterpret_cast<const uint64_t &>(pPipeline->graphicsPipelineCI.renderPass), pPipeline->graphicsPipelineCI.subpass,
2924                subpass_desc->colorAttachmentCount, color_blend_state->attachmentCount,
2925                validation_error_map[VALIDATION_ERROR_02109]);
2926        }
2927        if (!dev_data->enabled_features.independentBlend) {
2928            if (pPipeline->attachments.size() > 1) {
2929                VkPipelineColorBlendAttachmentState *pAttachments = &pPipeline->attachments[0];
2930                for (size_t i = 1; i < pPipeline->attachments.size(); i++) {
2931                    // Quoting the spec: "If [the independent blend] feature is not enabled, the VkPipelineColorBlendAttachmentState
2932                    // settings for all color attachments must be identical." VkPipelineColorBlendAttachmentState contains
2933                    // only attachment state, so memcmp is best suited for the comparison
2934                    if (memcmp(static_cast<const void *>(pAttachments), static_cast<const void *>(&pAttachments[i]),
2935                               sizeof(pAttachments[0]))) {
2936                        skip |=
2937                            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
2938                                    reinterpret_cast<uint64_t &>(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_01532, "DS",
2939                                    "Invalid Pipeline CreateInfo: If independent blend feature not "
2940                                    "enabled, all elements of pAttachments must be identical. %s",
2941                                    validation_error_map[VALIDATION_ERROR_01532]);
2942                        break;
2943                    }
2944                }
2945            }
2946        }
2947        if (!dev_data->enabled_features.logicOp && (pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable != VK_FALSE)) {
2948            skip |=
2949                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
2950                        reinterpret_cast<uint64_t &>(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_01533, "DS",
2951                        "Invalid Pipeline CreateInfo: If logic operations feature not enabled, logicOpEnable must be VK_FALSE. %s",
2952                        validation_error_map[VALIDATION_ERROR_01533]);
2953        }
2954    }
2955
2956    // Ensure the subpass index is valid. If not, then validate_and_capture_pipeline_shader_state
2957    // produces nonsense errors that confuse users. Other layers should already
2958    // emit errors for renderpass being invalid.
2959    auto renderPass = GetRenderPassState(dev_data, pPipeline->graphicsPipelineCI.renderPass);
2960    if (renderPass && pPipeline->graphicsPipelineCI.subpass >= renderPass->createInfo.subpassCount) {
2961        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
2962                        reinterpret_cast<uint64_t &>(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_02122, "DS",
2963                        "Invalid Pipeline CreateInfo State: Subpass index %u "
2964                        "is out of range for this renderpass (0..%u). %s",
2965                        pPipeline->graphicsPipelineCI.subpass, renderPass->createInfo.subpassCount - 1,
2966                        validation_error_map[VALIDATION_ERROR_02122]);
2967    }
2968
2969    if (!GetDisables(dev_data)->shader_validation && !validate_and_capture_pipeline_shader_state(dev_data, pPipeline)) {
2970        skip = true;
2971    }
2972    // Each shader's stage must be unique
2973    if (pPipeline->duplicate_shaders) {
2974        for (uint32_t stage = VK_SHADER_STAGE_VERTEX_BIT; stage & VK_SHADER_STAGE_ALL_GRAPHICS; stage <<= 1) {
2975            if (pPipeline->duplicate_shaders & stage) {
2976                skip |=
2977                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
2978                            reinterpret_cast<uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE,
2979                            "DS", "Invalid Pipeline CreateInfo State: Multiple shaders provided for stage %s",
2980                            string_VkShaderStageFlagBits(VkShaderStageFlagBits(stage)));
2981            }
2982        }
2983    }
2984    // VS is required
2985    if (!(pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT)) {
2986        skip |=
2987            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
2988                    reinterpret_cast<uint64_t &>(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_00532, "DS",
2989                    "Invalid Pipeline CreateInfo State: Vertex Shader required. %s", validation_error_map[VALIDATION_ERROR_00532]);
2990    }
2991    // Either both or neither TC/TE shaders should be defined
2992    if ((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) &&
2993        !(pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)) {
2994        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
2995                        reinterpret_cast<uint64_t &>(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_00534, "DS",
2996                        "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair. %s",
2997                        validation_error_map[VALIDATION_ERROR_00534]);
2998    }
2999    if (!(pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) &&
3000        (pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)) {
3001        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3002                        reinterpret_cast<uint64_t &>(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_00535, "DS",
3003                        "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair. %s",
3004                        validation_error_map[VALIDATION_ERROR_00535]);
3005    }
3006    // Compute shaders should be specified independent of Gfx shaders
3007    if (pPipeline->active_shaders & VK_SHADER_STAGE_COMPUTE_BIT) {
3008        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3009                        reinterpret_cast<uint64_t &>(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_00533, "DS",
3010                        "Invalid Pipeline CreateInfo State: Do not specify Compute Shader for Gfx Pipeline. %s",
3011                        validation_error_map[VALIDATION_ERROR_00533]);
3012    }
3013    // VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid for tessellation pipelines.
3014    // Mismatching primitive topology and tessellation fails graphics pipeline creation.
3015    if (pPipeline->active_shaders & (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) &&
3016        (!pPipeline->graphicsPipelineCI.pInputAssemblyState ||
3017         pPipeline->graphicsPipelineCI.pInputAssemblyState->topology != VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
3018        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3019                        reinterpret_cast<uint64_t &>(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_02099, "DS",
3020                        "Invalid Pipeline CreateInfo State: "
3021                        "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST must be set as IA "
3022                        "topology for tessellation pipelines. %s",
3023                        validation_error_map[VALIDATION_ERROR_02099]);
3024    }
3025    if (pPipeline->graphicsPipelineCI.pInputAssemblyState &&
3026        pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) {
3027        if (~pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) {
3028            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3029                            reinterpret_cast<uint64_t &>(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_02100, "DS",
3030                            "Invalid Pipeline CreateInfo State: "
3031                            "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3032                            "topology is only valid for tessellation pipelines. %s",
3033                            validation_error_map[VALIDATION_ERROR_02100]);
3034        }
3035    }
3036
3037    if (pPipeline->graphicsPipelineCI.pTessellationState &&
3038        ((pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints == 0) ||
3039         (pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints >
3040          dev_data->phys_dev_properties.properties.limits.maxTessellationPatchSize))) {
3041        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3042                        reinterpret_cast<uint64_t &>(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_01426, "DS",
3043                        "Invalid Pipeline CreateInfo State: "
3044                        "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3045                        "topology used with patchControlPoints value %u."
3046                        " patchControlPoints should be >0 and <=%u. %s",
3047                        pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints,
3048                        dev_data->phys_dev_properties.properties.limits.maxTessellationPatchSize,
3049                        validation_error_map[VALIDATION_ERROR_01426]);
3050    }
3051
3052    // If a rasterization state is provided...
3053    if (pPipeline->graphicsPipelineCI.pRasterizationState) {
3054        // Make sure that the line width conforms to the HW.
3055        if (!isDynamic(pPipeline, VK_DYNAMIC_STATE_LINE_WIDTH)) {
3056            skip |= verifyLineWidth(dev_data, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, kVulkanObjectTypePipeline,
3057                                    reinterpret_cast<uint64_t const &>(pPipeline->pipeline),
3058                                    pPipeline->graphicsPipelineCI.pRasterizationState->lineWidth);
3059        }
3060
3061        if ((pPipeline->graphicsPipelineCI.pRasterizationState->depthClampEnable == VK_TRUE) &&
3062            (!dev_data->enabled_features.depthClamp)) {
3063            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3064                            reinterpret_cast<uint64_t &>(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_01455, "DS",
3065                            "vkCreateGraphicsPipelines(): the depthClamp device feature is disabled: the depthClampEnable "
3066                            "member of the VkPipelineRasterizationStateCreateInfo structure must be set to VK_FALSE. %s",
3067                            validation_error_map[VALIDATION_ERROR_01455]);
3068        }
3069
3070        if (!isDynamic(pPipeline, VK_DYNAMIC_STATE_DEPTH_BIAS) &&
3071            (pPipeline->graphicsPipelineCI.pRasterizationState->depthBiasClamp != 0.0) &&
3072            (!dev_data->enabled_features.depthBiasClamp)) {
3073            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3074                            reinterpret_cast<uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_INVALID_FEATURE, "DS",
3075                            "vkCreateGraphicsPipelines(): the depthBiasClamp device feature is disabled: the depthBiasClamp "
3076                            "member of the VkPipelineRasterizationStateCreateInfo structure must be set to 0.0 unless the "
3077                            "VK_DYNAMIC_STATE_DEPTH_BIAS dynamic state is enabled");
3078        }
3079
3080        // If rasterization is enabled...
3081        if (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE) {
3082            auto subpass_desc = renderPass ? &renderPass->createInfo.pSubpasses[pPipeline->graphicsPipelineCI.subpass] : nullptr;
3083
3084            if ((pPipeline->graphicsPipelineCI.pMultisampleState->alphaToOneEnable == VK_TRUE) &&
3085                (!dev_data->enabled_features.alphaToOne)) {
3086                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3087                                reinterpret_cast<uint64_t &>(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_01464, "DS",
3088                                "vkCreateGraphicsPipelines(): the alphaToOne device feature is disabled: the alphaToOneEnable "
3089                                "member of the VkPipelineMultisampleStateCreateInfo structure must be set to VK_FALSE. %s",
3090                                validation_error_map[VALIDATION_ERROR_01464]);
3091            }
3092
3093            // If subpass uses a depth/stencil attachment, pDepthStencilState must be a pointer to a valid structure
3094            if (subpass_desc && subpass_desc->pDepthStencilAttachment &&
3095                subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
3096                if (!pPipeline->graphicsPipelineCI.pDepthStencilState) {
3097                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3098                                    reinterpret_cast<uint64_t &>(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_02115, "DS",
3099                                    "Invalid Pipeline CreateInfo State: pDepthStencilState is NULL when rasterization is "
3100                                    "enabled and subpass uses a depth/stencil attachment. %s",
3101                                    validation_error_map[VALIDATION_ERROR_02115]);
3102
3103                } else if ((pPipeline->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE) &&
3104                           (!dev_data->enabled_features.depthBounds)) {
3105                    skip |= log_msg(
3106                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3107                        reinterpret_cast<uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_INVALID_FEATURE, "DS",
3108                        "vkCreateGraphicsPipelines(): the depthBounds device feature is disabled: the depthBoundsTestEnable "
3109                        "member of the VkPipelineDepthStencilStateCreateInfo structure must be set to VK_FALSE.");
3110                }
3111            }
3112
3113            // If subpass uses color attachments, pColorBlendState must be valid pointer
3114            if (subpass_desc) {
3115                uint32_t color_attachment_count = 0;
3116                for (uint32_t i = 0; i < subpass_desc->colorAttachmentCount; ++i) {
3117                    if (subpass_desc->pColorAttachments[i].attachment != VK_ATTACHMENT_UNUSED) {
3118                        ++color_attachment_count;
3119                    }
3120                }
3121                if (color_attachment_count > 0 && pPipeline->graphicsPipelineCI.pColorBlendState == nullptr) {
3122                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3123                                    reinterpret_cast<uint64_t &>(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_02116, "DS",
3124                                    "Invalid Pipeline CreateInfo State: pColorBlendState is NULL when rasterization is "
3125                                    "enabled and subpass uses color attachments. %s",
3126                                    validation_error_map[VALIDATION_ERROR_02116]);
3127                }
3128            }
3129        }
3130    }
3131
3132    return skip;
3133}
3134
3135// Free the Pipeline nodes
3136static void deletePipelines(layer_data *dev_data) {
3137    if (dev_data->pipelineMap.size() <= 0) return;
3138    for (auto &pipe_map_pair : dev_data->pipelineMap) {
3139        delete pipe_map_pair.second;
3140    }
3141    dev_data->pipelineMap.clear();
3142}
3143
3144// Block of code at start here specifically for managing/tracking DSs
3145
3146// Return Pool node ptr for specified pool or else NULL
3147DESCRIPTOR_POOL_STATE *GetDescriptorPoolState(const layer_data *dev_data, const VkDescriptorPool pool) {
3148    auto pool_it = dev_data->descriptorPoolMap.find(pool);
3149    if (pool_it == dev_data->descriptorPoolMap.end()) {
3150        return NULL;
3151    }
3152    return pool_it->second;
3153}
3154
3155// Validate that given set is valid and that it's not being used by an in-flight CmdBuffer
3156// func_str is the name of the calling function
3157// Return false if no errors occur
3158// Return true if validation error occurs and callback returns true (to skip upcoming API call down the chain)
3159static bool validateIdleDescriptorSet(const layer_data *dev_data, VkDescriptorSet set, std::string func_str) {
3160    if (dev_data->instance_data->disabled.idle_descriptor_set) return false;
3161    bool skip = false;
3162    auto set_node = dev_data->setMap.find(set);
3163    if (set_node == dev_data->setMap.end()) {
3164        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3165                        (uint64_t)(set), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
3166                        "Cannot call %s() on descriptor set 0x%" PRIxLEAST64 " that has not been allocated.", func_str.c_str(),
3167                        (uint64_t)(set));
3168    } else {
3169        // TODO : This covers various error cases so should pass error enum into this function and use passed in enum here
3170        if (set_node->second->in_use.load()) {
3171            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3172                            (uint64_t)(set), __LINE__, VALIDATION_ERROR_00919, "DS",
3173                            "Cannot call %s() on descriptor set 0x%" PRIxLEAST64 " that is in use by a command buffer. %s",
3174                            func_str.c_str(), (uint64_t)(set), validation_error_map[VALIDATION_ERROR_00919]);
3175        }
3176    }
3177    return skip;
3178}
3179
3180// Remove set from setMap and delete the set
3181static void freeDescriptorSet(layer_data *dev_data, cvdescriptorset::DescriptorSet *descriptor_set) {
3182    dev_data->setMap.erase(descriptor_set->GetSet());
3183    delete descriptor_set;
3184}
3185// Free all DS Pools including their Sets & related sub-structs
3186// NOTE : Calls to this function should be wrapped in mutex
3187static void deletePools(layer_data *dev_data) {
3188    if (dev_data->descriptorPoolMap.size() <= 0) return;
3189    for (auto ii = dev_data->descriptorPoolMap.begin(); ii != dev_data->descriptorPoolMap.end(); ++ii) {
3190        // Remove this pools' sets from setMap and delete them
3191        for (auto ds : (*ii).second->sets) {
3192            freeDescriptorSet(dev_data, ds);
3193        }
3194        (*ii).second->sets.clear();
3195    }
3196    dev_data->descriptorPoolMap.clear();
3197}
3198
3199static void clearDescriptorPool(layer_data *dev_data, const VkDevice device, const VkDescriptorPool pool,
3200                                VkDescriptorPoolResetFlags flags) {
3201    DESCRIPTOR_POOL_STATE *pPool = GetDescriptorPoolState(dev_data, pool);
3202    // TODO: validate flags
3203    // For every set off of this pool, clear it, remove from setMap, and free cvdescriptorset::DescriptorSet
3204    for (auto ds : pPool->sets) {
3205        freeDescriptorSet(dev_data, ds);
3206    }
3207    pPool->sets.clear();
3208    // Reset available count for each type and available sets for this pool
3209    for (uint32_t i = 0; i < pPool->availableDescriptorTypeCount.size(); ++i) {
3210        pPool->availableDescriptorTypeCount[i] = pPool->maxDescriptorTypeCount[i];
3211    }
3212    pPool->availableSets = pPool->maxSets;
3213}
3214
3215// For given CB object, fetch associated CB Node from map
3216GLOBAL_CB_NODE *GetCBNode(layer_data const *dev_data, const VkCommandBuffer cb) {
3217    auto it = dev_data->commandBufferMap.find(cb);
3218    if (it == dev_data->commandBufferMap.end()) {
3219        return NULL;
3220    }
3221    return it->second;
3222}
3223
3224// If a renderpass is active, verify that the given command type is appropriate for current subpass state
3225bool ValidateCmdSubpassState(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd_type) {
3226    if (!pCB->activeRenderPass) return false;
3227    bool skip = false;
3228    if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS &&
3229        (cmd_type != CMD_EXECUTECOMMANDS && cmd_type != CMD_NEXTSUBPASS && cmd_type != CMD_ENDRENDERPASS)) {
3230        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3231                        reinterpret_cast<uint64_t>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3232                        "Commands cannot be called in a subpass using secondary command buffers.");
3233    } else if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_INLINE && cmd_type == CMD_EXECUTECOMMANDS) {
3234        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3235                        reinterpret_cast<uint64_t>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3236                        "vkCmdExecuteCommands() cannot be called in a subpass using inline commands.");
3237    }
3238    return skip;
3239}
3240
3241bool ValidateCmdQueueFlags(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, const char *caller_name, VkQueueFlags required_flags,
3242                           UNIQUE_VALIDATION_ERROR_CODE error_code) {
3243    auto pool = GetCommandPoolNode(dev_data, cb_node->createInfo.commandPool);
3244    if (pool) {
3245        VkQueueFlags queue_flags = dev_data->phys_dev_properties.queue_family_properties[pool->queueFamilyIndex].queueFlags;
3246        if (!(required_flags & queue_flags)) {
3247            string required_flags_string;
3248            for (auto flag : {VK_QUEUE_TRANSFER_BIT, VK_QUEUE_GRAPHICS_BIT, VK_QUEUE_COMPUTE_BIT}) {
3249                if (flag & required_flags) {
3250                    if (required_flags_string.size()) {
3251                        required_flags_string += " or ";
3252                    }
3253                    required_flags_string += string_VkQueueFlagBits(flag);
3254                }
3255            }
3256            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3257                           reinterpret_cast<uint64_t>(cb_node->commandBuffer), __LINE__, error_code, "DS",
3258                           "Cannot call %s on a command buffer allocated from a pool without %s capabilities. %s.", caller_name,
3259                           required_flags_string.c_str(), validation_error_map[error_code]);
3260        }
3261    }
3262    return false;
3263}
3264
3265static bool ReportInvalidCommandBuffer(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, const char *call_source) {
3266    bool skip = false;
3267    for (auto obj : cb_state->broken_bindings) {
3268        const char *type_str = object_string[obj.type];
3269        // Descriptor sets are a special case that can be either destroyed or updated to invalidate a CB
3270        const char *cause_str = (obj.type == kVulkanObjectTypeDescriptorSet) ? "destroyed or updated" : "destroyed";
3271        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3272                        reinterpret_cast<uint64_t &>(cb_state->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3273                        "You are adding %s to command buffer 0x%p that is invalid because bound %s 0x%" PRIxLEAST64 " was %s.",
3274                        call_source, cb_state->commandBuffer, type_str, obj.handle, cause_str);
3275    }
3276    return skip;
3277}
3278
3279// Validate the given command being added to the specified cmd buffer, flagging errors if CB is not in the recording state or if
3280// there's an issue with the Cmd ordering
3281bool ValidateCmd(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, const CMD_TYPE cmd, const char *caller_name) {
3282    switch (cb_state->state) {
3283        case CB_RECORDING:
3284            return ValidateCmdSubpassState(dev_data, cb_state, cmd);
3285
3286        case CB_INVALID:
3287            return ReportInvalidCommandBuffer(dev_data, cb_state, caller_name);
3288
3289        default:
3290            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3291                           reinterpret_cast<uint64_t &>(cb_state->commandBuffer), __LINE__, DRAWSTATE_NO_BEGIN_COMMAND_BUFFER, "DS",
3292                           "You must call vkBeginCommandBuffer() before this call to %s", caller_name);
3293    }
3294}
3295
3296void UpdateCmdBufferLastCmd(GLOBAL_CB_NODE *cb_state, const CMD_TYPE cmd) {
3297    if (cb_state->state == CB_RECORDING) {
3298        cb_state->last_cmd = cmd;
3299    }
3300}
3301// For given object struct return a ptr of BASE_NODE type for its wrapping struct
3302BASE_NODE *GetStateStructPtrFromObject(layer_data *dev_data, VK_OBJECT object_struct) {
3303    BASE_NODE *base_ptr = nullptr;
3304    switch (object_struct.type) {
3305        case kVulkanObjectTypeDescriptorSet: {
3306            base_ptr = GetSetNode(dev_data, reinterpret_cast<VkDescriptorSet &>(object_struct.handle));
3307            break;
3308        }
3309        case kVulkanObjectTypeSampler: {
3310            base_ptr = GetSamplerState(dev_data, reinterpret_cast<VkSampler &>(object_struct.handle));
3311            break;
3312        }
3313        case kVulkanObjectTypeQueryPool: {
3314            base_ptr = GetQueryPoolNode(dev_data, reinterpret_cast<VkQueryPool &>(object_struct.handle));
3315            break;
3316        }
3317        case kVulkanObjectTypePipeline: {
3318            base_ptr = getPipelineState(dev_data, reinterpret_cast<VkPipeline &>(object_struct.handle));
3319            break;
3320        }
3321        case kVulkanObjectTypeBuffer: {
3322            base_ptr = GetBufferState(dev_data, reinterpret_cast<VkBuffer &>(object_struct.handle));
3323            break;
3324        }
3325        case kVulkanObjectTypeBufferView: {
3326            base_ptr = GetBufferViewState(dev_data, reinterpret_cast<VkBufferView &>(object_struct.handle));
3327            break;
3328        }
3329        case kVulkanObjectTypeImage: {
3330            base_ptr = GetImageState(dev_data, reinterpret_cast<VkImage &>(object_struct.handle));
3331            break;
3332        }
3333        case kVulkanObjectTypeImageView: {
3334            base_ptr = GetImageViewState(dev_data, reinterpret_cast<VkImageView &>(object_struct.handle));
3335            break;
3336        }
3337        case kVulkanObjectTypeEvent: {
3338            base_ptr = GetEventNode(dev_data, reinterpret_cast<VkEvent &>(object_struct.handle));
3339            break;
3340        }
3341        case kVulkanObjectTypeDescriptorPool: {
3342            base_ptr = GetDescriptorPoolState(dev_data, reinterpret_cast<VkDescriptorPool &>(object_struct.handle));
3343            break;
3344        }
3345        case kVulkanObjectTypeCommandPool: {
3346            base_ptr = GetCommandPoolNode(dev_data, reinterpret_cast<VkCommandPool &>(object_struct.handle));
3347            break;
3348        }
3349        case kVulkanObjectTypeFramebuffer: {
3350            base_ptr = GetFramebufferState(dev_data, reinterpret_cast<VkFramebuffer &>(object_struct.handle));
3351            break;
3352        }
3353        case kVulkanObjectTypeRenderPass: {
3354            base_ptr = GetRenderPassState(dev_data, reinterpret_cast<VkRenderPass &>(object_struct.handle));
3355            break;
3356        }
3357        case kVulkanObjectTypeDeviceMemory: {
3358            base_ptr = GetMemObjInfo(dev_data, reinterpret_cast<VkDeviceMemory &>(object_struct.handle));
3359            break;
3360        }
3361        default:
3362            // TODO : Any other objects to be handled here?
3363            assert(0);
3364            break;
3365    }
3366    return base_ptr;
3367}
3368
3369// Tie the VK_OBJECT to the cmd buffer which includes:
3370//  Add object_binding to cmd buffer
3371//  Add cb_binding to object
3372static void addCommandBufferBinding(std::unordered_set<GLOBAL_CB_NODE *> *cb_bindings, VK_OBJECT obj, GLOBAL_CB_NODE *cb_node) {
3373    cb_bindings->insert(cb_node);
3374    cb_node->object_bindings.insert(obj);
3375}
3376// For a given object, if cb_node is in that objects cb_bindings, remove cb_node
3377static void removeCommandBufferBinding(layer_data *dev_data, VK_OBJECT const *object, GLOBAL_CB_NODE *cb_node) {
3378    BASE_NODE *base_obj = GetStateStructPtrFromObject(dev_data, *object);
3379    if (base_obj) base_obj->cb_bindings.erase(cb_node);
3380}
3381// Reset the command buffer state
3382//  Maintain the createInfo and set state to CB_NEW, but clear all other state
3383static void resetCB(layer_data *dev_data, const VkCommandBuffer cb) {
3384    GLOBAL_CB_NODE *pCB = dev_data->commandBufferMap[cb];
3385    if (pCB) {
3386        pCB->in_use.store(0);
3387        pCB->last_cmd = CMD_NONE;
3388        // Reset CB state (note that createInfo is not cleared)
3389        pCB->commandBuffer = cb;
3390        memset(&pCB->beginInfo, 0, sizeof(VkCommandBufferBeginInfo));
3391        memset(&pCB->inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo));
3392        pCB->hasDrawCmd = false;
3393        pCB->state = CB_NEW;
3394        pCB->submitCount = 0;
3395        pCB->status = 0;
3396        pCB->viewportMask = 0;
3397        pCB->scissorMask = 0;
3398
3399        for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
3400            pCB->lastBound[i].reset();
3401        }
3402
3403        memset(&pCB->activeRenderPassBeginInfo, 0, sizeof(pCB->activeRenderPassBeginInfo));
3404        pCB->activeRenderPass = nullptr;
3405        pCB->activeSubpassContents = VK_SUBPASS_CONTENTS_INLINE;
3406        pCB->activeSubpass = 0;
3407        pCB->broken_bindings.clear();
3408        pCB->waitedEvents.clear();
3409        pCB->events.clear();
3410        pCB->writeEventsBeforeWait.clear();
3411        pCB->waitedEventsBeforeQueryReset.clear();
3412        pCB->queryToStateMap.clear();
3413        pCB->activeQueries.clear();
3414        pCB->startedQueries.clear();
3415        pCB->imageLayoutMap.clear();
3416        pCB->eventToStageMap.clear();
3417        pCB->drawData.clear();
3418        pCB->currentDrawData.buffers.clear();
3419        pCB->vertex_buffer_used = false;
3420        pCB->primaryCommandBuffer = VK_NULL_HANDLE;
3421        // Make sure any secondaryCommandBuffers are removed from globalInFlight
3422        for (auto secondary_cb : pCB->secondaryCommandBuffers) {
3423            dev_data->globalInFlightCmdBuffers.erase(secondary_cb);
3424        }
3425        pCB->secondaryCommandBuffers.clear();
3426        pCB->updateImages.clear();
3427        pCB->updateBuffers.clear();
3428        clear_cmd_buf_and_mem_references(dev_data, pCB);
3429        pCB->eventUpdates.clear();
3430        pCB->queryUpdates.clear();
3431
3432        // Remove object bindings
3433        for (auto obj : pCB->object_bindings) {
3434            removeCommandBufferBinding(dev_data, &obj, pCB);
3435        }
3436        pCB->object_bindings.clear();
3437        // Remove this cmdBuffer's reference from each FrameBuffer's CB ref list
3438        for (auto framebuffer : pCB->framebuffers) {
3439            auto fb_state = GetFramebufferState(dev_data, framebuffer);
3440            if (fb_state) fb_state->cb_bindings.erase(pCB);
3441        }
3442        pCB->framebuffers.clear();
3443        pCB->activeFramebuffer = VK_NULL_HANDLE;
3444    }
3445}
3446
3447// Set PSO-related status bits for CB, including dynamic state set via PSO
3448static void set_cb_pso_status(GLOBAL_CB_NODE *pCB, const PIPELINE_STATE *pPipe) {
3449    // Account for any dynamic state not set via this PSO
3450    if (!pPipe->graphicsPipelineCI.pDynamicState ||
3451        !pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount) {  // All state is static
3452        pCB->status |= CBSTATUS_ALL_STATE_SET;
3453    } else {
3454        // First consider all state on
3455        // Then unset any state that's noted as dynamic in PSO
3456        // Finally OR that into CB statemask
3457        CBStatusFlags psoDynStateMask = CBSTATUS_ALL_STATE_SET;
3458        for (uint32_t i = 0; i < pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
3459            switch (pPipe->graphicsPipelineCI.pDynamicState->pDynamicStates[i]) {
3460                case VK_DYNAMIC_STATE_LINE_WIDTH:
3461                    psoDynStateMask &= ~CBSTATUS_LINE_WIDTH_SET;
3462                    break;
3463                case VK_DYNAMIC_STATE_DEPTH_BIAS:
3464                    psoDynStateMask &= ~CBSTATUS_DEPTH_BIAS_SET;
3465                    break;
3466                case VK_DYNAMIC_STATE_BLEND_CONSTANTS:
3467                    psoDynStateMask &= ~CBSTATUS_BLEND_CONSTANTS_SET;
3468                    break;
3469                case VK_DYNAMIC_STATE_DEPTH_BOUNDS:
3470                    psoDynStateMask &= ~CBSTATUS_DEPTH_BOUNDS_SET;
3471                    break;
3472                case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK:
3473                    psoDynStateMask &= ~CBSTATUS_STENCIL_READ_MASK_SET;
3474                    break;
3475                case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK:
3476                    psoDynStateMask &= ~CBSTATUS_STENCIL_WRITE_MASK_SET;
3477                    break;
3478                case VK_DYNAMIC_STATE_STENCIL_REFERENCE:
3479                    psoDynStateMask &= ~CBSTATUS_STENCIL_REFERENCE_SET;
3480                    break;
3481                default:
3482                    // TODO : Flag error here
3483                    break;
3484            }
3485        }
3486        pCB->status |= psoDynStateMask;
3487    }
3488}
3489
3490// Flags validation error if the associated call is made inside a render pass. The apiName routine should ONLY be called outside a
3491// render pass.
3492bool insideRenderPass(const layer_data *dev_data, GLOBAL_CB_NODE *pCB, const char *apiName, UNIQUE_VALIDATION_ERROR_CODE msgCode) {
3493    bool inside = false;
3494    if (pCB->activeRenderPass) {
3495        inside = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3496                         (uint64_t)pCB->commandBuffer, __LINE__, msgCode, "DS",
3497                         "%s: It is invalid to issue this call inside an active render pass (0x%" PRIxLEAST64 "). %s", apiName,
3498                         (uint64_t)pCB->activeRenderPass->renderPass, validation_error_map[msgCode]);
3499    }
3500    return inside;
3501}
3502
3503// Flags validation error if the associated call is made outside a render pass. The apiName
3504// routine should ONLY be called inside a render pass.
3505bool outsideRenderPass(const layer_data *dev_data, GLOBAL_CB_NODE *pCB, const char *apiName, UNIQUE_VALIDATION_ERROR_CODE msgCode) {
3506    bool outside = false;
3507    if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (!pCB->activeRenderPass)) ||
3508        ((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && (!pCB->activeRenderPass) &&
3509         !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))) {
3510        outside = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3511                          (uint64_t)pCB->commandBuffer, __LINE__, msgCode, "DS",
3512                          "%s: This call must be issued inside an active render pass. %s", apiName, validation_error_map[msgCode]);
3513    }
3514    return outside;
3515}
3516
3517static void init_core_validation(instance_layer_data *instance_data, const VkAllocationCallbacks *pAllocator) {
3518    layer_debug_actions(instance_data->report_data, instance_data->logging_callback, pAllocator, "lunarg_core_validation");
3519}
3520
3521static void checkInstanceRegisterExtensions(const VkInstanceCreateInfo *pCreateInfo, instance_layer_data *instance_data) {
3522    for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
3523        if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SURFACE_EXTENSION_NAME))
3524            instance_data->surfaceExtensionEnabled = true;
3525        if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_DISPLAY_EXTENSION_NAME))
3526            instance_data->displayExtensionEnabled = true;
3527#ifdef VK_USE_PLATFORM_ANDROID_KHR
3528        if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_ANDROID_SURFACE_EXTENSION_NAME))
3529            instance_data->androidSurfaceExtensionEnabled = true;
3530#endif
3531#ifdef VK_USE_PLATFORM_MIR_KHR
3532        if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_MIR_SURFACE_EXTENSION_NAME))
3533            instance_data->mirSurfaceExtensionEnabled = true;
3534#endif
3535#ifdef VK_USE_PLATFORM_WAYLAND_KHR
3536        if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME))
3537            instance_data->waylandSurfaceExtensionEnabled = true;
3538#endif
3539#ifdef VK_USE_PLATFORM_WIN32_KHR
3540        if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_WIN32_SURFACE_EXTENSION_NAME))
3541            instance_data->win32SurfaceExtensionEnabled = true;
3542#endif
3543#ifdef VK_USE_PLATFORM_XCB_KHR
3544        if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_XCB_SURFACE_EXTENSION_NAME))
3545            instance_data->xcbSurfaceExtensionEnabled = true;
3546#endif
3547#ifdef VK_USE_PLATFORM_XLIB_KHR
3548        if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_XLIB_SURFACE_EXTENSION_NAME))
3549            instance_data->xlibSurfaceExtensionEnabled = true;
3550#endif
3551    }
3552}
3553
3554// For the given ValidationCheck enum, set all relevant instance disabled flags to true
3555void SetDisabledFlags(instance_layer_data *instance_data, VkValidationFlagsEXT *val_flags_struct) {
3556    for (uint32_t i = 0; i < val_flags_struct->disabledValidationCheckCount; ++i) {
3557        switch (val_flags_struct->pDisabledValidationChecks[i]) {
3558            case VK_VALIDATION_CHECK_ALL_EXT:
3559                // Set all disabled flags to true
3560                instance_data->disabled.SetAll(true);
3561                break;
3562            default:
3563                break;
3564        }
3565    }
3566}
3567
3568VKAPI_ATTR VkResult VKAPI_CALL CreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
3569                                              VkInstance *pInstance) {
3570    VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
3571
3572    assert(chain_info->u.pLayerInfo);
3573    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
3574    PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
3575    if (fpCreateInstance == NULL) return VK_ERROR_INITIALIZATION_FAILED;
3576
3577    // Advance the link info for the next element on the chain
3578    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
3579
3580    VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
3581    if (result != VK_SUCCESS) return result;
3582
3583    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(*pInstance), instance_layer_data_map);
3584    instance_data->instance = *pInstance;
3585    layer_init_instance_dispatch_table(*pInstance, &instance_data->dispatch_table, fpGetInstanceProcAddr);
3586    instance_data->report_data = debug_report_create_instance(
3587        &instance_data->dispatch_table, *pInstance, pCreateInfo->enabledExtensionCount, pCreateInfo->ppEnabledExtensionNames);
3588    checkInstanceRegisterExtensions(pCreateInfo, instance_data);
3589    init_core_validation(instance_data, pAllocator);
3590
3591    ValidateLayerOrdering(*pCreateInfo);
3592    // Parse any pNext chains
3593    if (pCreateInfo->pNext) {
3594        GENERIC_HEADER *struct_header = (GENERIC_HEADER *)pCreateInfo->pNext;
3595        while (struct_header) {
3596            // Check for VkValidationFlagsExt
3597            if (VK_STRUCTURE_TYPE_VALIDATION_FLAGS_EXT == struct_header->sType) {
3598                SetDisabledFlags(instance_data, (VkValidationFlagsEXT *)struct_header);
3599            }
3600            struct_header = (GENERIC_HEADER *)struct_header->pNext;
3601        }
3602    }
3603
3604    return result;
3605}
3606
3607// Hook DestroyInstance to remove tableInstanceMap entry
3608VKAPI_ATTR void VKAPI_CALL DestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
3609    // TODOSC : Shouldn't need any customization here
3610    dispatch_key key = get_dispatch_key(instance);
3611    // TBD: Need any locking this early, in case this function is called at the
3612    // same time by more than one thread?
3613    instance_layer_data *instance_data = GetLayerDataPtr(key, instance_layer_data_map);
3614    instance_data->dispatch_table.DestroyInstance(instance, pAllocator);
3615
3616    std::lock_guard<std::mutex> lock(global_lock);
3617    // Clean up logging callback, if any
3618    while (instance_data->logging_callback.size() > 0) {
3619        VkDebugReportCallbackEXT callback = instance_data->logging_callback.back();
3620        layer_destroy_msg_callback(instance_data->report_data, callback, pAllocator);
3621        instance_data->logging_callback.pop_back();
3622    }
3623
3624    layer_debug_report_destroy_instance(instance_data->report_data);
3625    layer_data_map.erase(key);
3626}
3627
3628static void checkDeviceRegisterExtensions(const VkDeviceCreateInfo *pCreateInfo, devExts *exts) {
3629
3630    static const std::pair<char const *, bool devExts::*> known_extensions[] {
3631        {VK_KHR_SWAPCHAIN_EXTENSION_NAME, &devExts::khr_swapchain_enabled},
3632        {VK_KHR_DISPLAY_SWAPCHAIN_EXTENSION_NAME, &devExts::khr_display_swapchain_enabled},
3633        {VK_NV_GLSL_SHADER_EXTENSION_NAME, &devExts::nv_glsl_shader_enabled},
3634        {VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_EXTENSION_NAME, &devExts::khr_descriptor_update_template_enabled},
3635        {VK_KHR_SHADER_DRAW_PARAMETERS_EXTENSION_NAME, &devExts::khr_shader_draw_parameters_enabled},
3636        {VK_KHR_MAINTENANCE1_EXTENSION_NAME, &devExts::khr_maintenance1_enabled},
3637        {VK_NV_GEOMETRY_SHADER_PASSTHROUGH_EXTENSION_NAME, &devExts::nv_geometry_shader_passthrough_enabled},
3638        {VK_NV_SAMPLE_MASK_OVERRIDE_COVERAGE_EXTENSION_NAME, &devExts::nv_sample_mask_override_coverage_enabled},
3639        {VK_NV_VIEWPORT_ARRAY2_EXTENSION_NAME, &devExts::nv_viewport_array2_enabled},
3640        {VK_EXT_SHADER_SUBGROUP_BALLOT_EXTENSION_NAME, &devExts::khr_subgroup_ballot_enabled},
3641        {VK_EXT_SHADER_SUBGROUP_VOTE_EXTENSION_NAME, &devExts::khr_subgroup_vote_enabled},
3642    };
3643
3644    for (auto ext : known_extensions) {
3645        exts->*(ext.second) = false;
3646    }
3647
3648    for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
3649        for (auto ext : known_extensions) {
3650            if (!strcmp(ext.first, pCreateInfo->ppEnabledExtensionNames[i])) {
3651                exts->*(ext.second) = true;
3652                break;
3653            }
3654        }
3655    }
3656}
3657
3658// Verify that queue family has been properly requested
3659static bool ValidateRequestedQueueFamilyProperties(instance_layer_data *instance_data, VkPhysicalDevice gpu,
3660                                                   const VkDeviceCreateInfo *create_info) {
3661    bool skip = false;
3662    auto physical_device_state = GetPhysicalDeviceState(instance_data, gpu);
3663    // First check is app has actually requested queueFamilyProperties
3664    if (!physical_device_state) {
3665        skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
3666                        0, __LINE__, DEVLIMITS_MUST_QUERY_COUNT, "DL",
3667                        "Invalid call to vkCreateDevice() w/o first calling vkEnumeratePhysicalDevices().");
3668    } else if (QUERY_DETAILS != physical_device_state->vkGetPhysicalDeviceQueueFamilyPropertiesState) {
3669        // TODO: This is not called out as an invalid use in the spec so make more informative recommendation.
3670        skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
3671                        VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST, "DL",
3672                        "Call to vkCreateDevice() w/o first calling vkGetPhysicalDeviceQueueFamilyProperties().");
3673    } else {
3674        // Check that the requested queue properties are valid
3675        for (uint32_t i = 0; i < create_info->queueCreateInfoCount; i++) {
3676            uint32_t requestedIndex = create_info->pQueueCreateInfos[i].queueFamilyIndex;
3677            if (requestedIndex >= physical_device_state->queue_family_properties.size()) {
3678                skip |= log_msg(
3679                    instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
3680                    __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST, "DL",
3681                    "Invalid queue create request in vkCreateDevice(). Invalid queueFamilyIndex %u requested.", requestedIndex);
3682            } else if (create_info->pQueueCreateInfos[i].queueCount >
3683                       physical_device_state->queue_family_properties[requestedIndex].queueCount) {
3684                skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
3685                                VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
3686                                DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST, "DL",
3687                                "Invalid queue create request in vkCreateDevice(). QueueFamilyIndex %u only has %u queues, but "
3688                                "requested queueCount is %u.",
3689                                requestedIndex, physical_device_state->queue_family_properties[requestedIndex].queueCount,
3690                                create_info->pQueueCreateInfos[i].queueCount);
3691            }
3692        }
3693    }
3694    return skip;
3695}
3696
3697// Verify that features have been queried and that they are available
3698static bool ValidateRequestedFeatures(instance_layer_data *dev_data, VkPhysicalDevice phys,
3699                                      const VkPhysicalDeviceFeatures *requested_features) {
3700    bool skip = false;
3701
3702    auto phys_device_state = GetPhysicalDeviceState(dev_data, phys);
3703    const VkBool32 *actual = reinterpret_cast<VkBool32 *>(&phys_device_state->features);
3704    const VkBool32 *requested = reinterpret_cast<const VkBool32 *>(requested_features);
3705    // TODO : This is a nice, compact way to loop through struct, but a bad way to report issues
3706    //  Need to provide the struct member name with the issue. To do that seems like we'll
3707    //  have to loop through each struct member which should be done w/ codegen to keep in synch.
3708    uint32_t errors = 0;
3709    uint32_t total_bools = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
3710    for (uint32_t i = 0; i < total_bools; i++) {
3711        if (requested[i] > actual[i]) {
3712            // TODO: Add index to struct member name helper to be able to include a feature name
3713            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
3714                            0, __LINE__, DEVLIMITS_INVALID_FEATURE_REQUESTED, "DL",
3715                            "While calling vkCreateDevice(), requesting feature #%u in VkPhysicalDeviceFeatures struct, "
3716                            "which is not available on this device.",
3717                            i);
3718            errors++;
3719        }
3720    }
3721    if (errors && (UNCALLED == phys_device_state->vkGetPhysicalDeviceFeaturesState)) {
3722        // If user didn't request features, notify them that they should
3723        // TODO: Verify this against the spec. I believe this is an invalid use of the API and should return an error
3724        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
3725                        __LINE__, DEVLIMITS_INVALID_FEATURE_REQUESTED, "DL",
3726                        "You requested features that are unavailable on this device. You should first query feature "
3727                        "availability by calling vkGetPhysicalDeviceFeatures().");
3728    }
3729    return skip;
3730}
3731
3732VKAPI_ATTR VkResult VKAPI_CALL CreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
3733                                            const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
3734    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(gpu), instance_layer_data_map);
3735    bool skip = false;
3736
3737    // Check that any requested features are available
3738    if (pCreateInfo->pEnabledFeatures) {
3739        skip |= ValidateRequestedFeatures(instance_data, gpu, pCreateInfo->pEnabledFeatures);
3740    }
3741    skip |= ValidateRequestedQueueFamilyProperties(instance_data, gpu, pCreateInfo);
3742
3743    if (skip) {
3744        return VK_ERROR_VALIDATION_FAILED_EXT;
3745    }
3746
3747    VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
3748
3749    assert(chain_info->u.pLayerInfo);
3750    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
3751    PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
3752    PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(instance_data->instance, "vkCreateDevice");
3753    if (fpCreateDevice == NULL) {
3754        return VK_ERROR_INITIALIZATION_FAILED;
3755    }
3756
3757    // Advance the link info for the next element on the chain
3758    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
3759
3760    VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
3761    if (result != VK_SUCCESS) {
3762        return result;
3763    }
3764
3765    std::unique_lock<std::mutex> lock(global_lock);
3766    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(*pDevice), layer_data_map);
3767
3768    device_data->instance_data = instance_data;
3769    // Setup device dispatch table
3770    layer_init_device_dispatch_table(*pDevice, &device_data->dispatch_table, fpGetDeviceProcAddr);
3771    device_data->device = *pDevice;
3772    // Save PhysicalDevice handle
3773    device_data->physical_device = gpu;
3774
3775    device_data->report_data = layer_debug_report_create_device(instance_data->report_data, *pDevice);
3776    checkDeviceRegisterExtensions(pCreateInfo, &device_data->device_extensions);
3777    // Get physical device limits for this device
3778    instance_data->dispatch_table.GetPhysicalDeviceProperties(gpu, &(device_data->phys_dev_properties.properties));
3779    uint32_t count;
3780    instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(gpu, &count, nullptr);
3781    device_data->phys_dev_properties.queue_family_properties.resize(count);
3782    instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(
3783        gpu, &count, &device_data->phys_dev_properties.queue_family_properties[0]);
3784    // TODO: device limits should make sure these are compatible
3785    if (pCreateInfo->pEnabledFeatures) {
3786        device_data->enabled_features = *pCreateInfo->pEnabledFeatures;
3787    } else {
3788        memset(&device_data->enabled_features, 0, sizeof(VkPhysicalDeviceFeatures));
3789    }
3790    // Store physical device properties and physical device mem limits into device layer_data structs
3791    instance_data->dispatch_table.GetPhysicalDeviceMemoryProperties(gpu, &device_data->phys_dev_mem_props);
3792    instance_data->dispatch_table.GetPhysicalDeviceProperties(gpu, &device_data->phys_dev_props);
3793    lock.unlock();
3794
3795    ValidateLayerOrdering(*pCreateInfo);
3796
3797    return result;
3798}
3799
3800// prototype
3801VKAPI_ATTR void VKAPI_CALL DestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
3802    // TODOSC : Shouldn't need any customization here
3803    bool skip = false;
3804    dispatch_key key = get_dispatch_key(device);
3805    layer_data *dev_data = GetLayerDataPtr(key, layer_data_map);
3806    // Free all the memory
3807    std::unique_lock<std::mutex> lock(global_lock);
3808    deletePipelines(dev_data);
3809    dev_data->renderPassMap.clear();
3810    for (auto ii = dev_data->commandBufferMap.begin(); ii != dev_data->commandBufferMap.end(); ++ii) {
3811        delete (*ii).second;
3812    }
3813    dev_data->commandBufferMap.clear();
3814    // This will also delete all sets in the pool & remove them from setMap
3815    deletePools(dev_data);
3816    // All sets should be removed
3817    assert(dev_data->setMap.empty());
3818    for (auto del_layout : dev_data->descriptorSetLayoutMap) {
3819        delete del_layout.second;
3820    }
3821    dev_data->descriptorSetLayoutMap.clear();
3822    dev_data->imageViewMap.clear();
3823    dev_data->imageMap.clear();
3824    dev_data->imageSubresourceMap.clear();
3825    dev_data->imageLayoutMap.clear();
3826    dev_data->bufferViewMap.clear();
3827    dev_data->bufferMap.clear();
3828    // Queues persist until device is destroyed
3829    dev_data->queueMap.clear();
3830    // Report any memory leaks
3831    layer_debug_report_destroy_device(device);
3832    lock.unlock();
3833
3834#if DISPATCH_MAP_DEBUG
3835    fprintf(stderr, "Device: 0x%p, key: 0x%p\n", device, key);
3836#endif
3837    if (!skip) {
3838        dev_data->dispatch_table.DestroyDevice(device, pAllocator);
3839        layer_data_map.erase(key);
3840    }
3841}
3842
3843static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION}};
3844
3845// For given stage mask, if Geometry shader stage is on w/o GS being enabled, report geo_error_id
3846//   and if Tessellation Control or Evaluation shader stages are on w/o TS being enabled, report tess_error_id
3847static bool ValidateStageMaskGsTsEnables(layer_data *dev_data, VkPipelineStageFlags stageMask, const char *caller,
3848                                         UNIQUE_VALIDATION_ERROR_CODE geo_error_id, UNIQUE_VALIDATION_ERROR_CODE tess_error_id) {
3849    bool skip = false;
3850    if (!dev_data->enabled_features.geometryShader && (stageMask & VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT)) {
3851        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
3852                        geo_error_id, "DL",
3853                        "%s call includes a stageMask with VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT bit set when "
3854                        "device does not have geometryShader feature enabled. %s",
3855                        caller, validation_error_map[geo_error_id]);
3856    }
3857    if (!dev_data->enabled_features.tessellationShader &&
3858        (stageMask & (VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT))) {
3859        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
3860                        tess_error_id, "DL",
3861                        "%s call includes a stageMask with VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT "
3862                        "and/or VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT bit(s) set when device "
3863                        "does not have tessellationShader feature enabled. %s",
3864                        caller, validation_error_map[tess_error_id]);
3865    }
3866    return skip;
3867}
3868
3869// Loop through bound objects and increment their in_use counts.
3870static void IncrementBoundObjects(layer_data *dev_data, GLOBAL_CB_NODE const *cb_node) {
3871    for (auto obj : cb_node->object_bindings) {
3872        auto base_obj = GetStateStructPtrFromObject(dev_data, obj);
3873        if (base_obj) {
3874            base_obj->in_use.fetch_add(1);
3875        }
3876    }
3877}
3878// Track which resources are in-flight by atomically incrementing their "in_use" count
3879static void incrementResources(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
3880    cb_node->submitCount++;
3881    cb_node->in_use.fetch_add(1);
3882    dev_data->globalInFlightCmdBuffers.insert(cb_node->commandBuffer);
3883
3884    // First Increment for all "generic" objects bound to cmd buffer, followed by special-case objects below
3885    IncrementBoundObjects(dev_data, cb_node);
3886    // TODO : We should be able to remove the NULL look-up checks from the code below as long as
3887    //  all the corresponding cases are verified to cause CB_INVALID state and the CB_INVALID state
3888    //  should then be flagged prior to calling this function
3889    for (auto drawDataElement : cb_node->drawData) {
3890        for (auto buffer : drawDataElement.buffers) {
3891            auto buffer_state = GetBufferState(dev_data, buffer);
3892            if (buffer_state) {
3893                buffer_state->in_use.fetch_add(1);
3894            }
3895        }
3896    }
3897    for (auto event : cb_node->writeEventsBeforeWait) {
3898        auto event_state = GetEventNode(dev_data, event);
3899        if (event_state) event_state->write_in_use++;
3900    }
3901}
3902
3903// Note: This function assumes that the global lock is held by the calling thread.
3904// For the given queue, verify the queue state up to the given seq number.
3905// Currently the only check is to make sure that if there are events to be waited on prior to
3906//  a QueryReset, make sure that all such events have been signalled.
3907static bool VerifyQueueStateToSeq(layer_data *dev_data, QUEUE_STATE *queue, uint64_t seq) {
3908    bool skip = false;
3909    auto queue_seq = queue->seq;
3910    std::unordered_map<VkQueue, uint64_t> other_queue_seqs;
3911    auto sub_it = queue->submissions.begin();
3912    while (queue_seq < seq) {
3913        for (auto &wait : sub_it->waitSemaphores) {
3914            auto &last_seq = other_queue_seqs[wait.queue];
3915            last_seq = std::max(last_seq, wait.seq);
3916        }
3917        for (auto cb : sub_it->cbs) {
3918            auto cb_node = GetCBNode(dev_data, cb);
3919            if (cb_node) {
3920                for (auto queryEventsPair : cb_node->waitedEventsBeforeQueryReset) {
3921                    for (auto event : queryEventsPair.second) {
3922                        if (dev_data->eventMap[event].needsSignaled) {
3923                            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
3924                                            VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, 0, DRAWSTATE_INVALID_QUERY, "DS",
3925                                            "Cannot get query results on queryPool 0x%" PRIx64
3926                                            " with index %d which was guarded by unsignaled event 0x%" PRIx64 ".",
3927                                            (uint64_t)(queryEventsPair.first.pool), queryEventsPair.first.index, (uint64_t)(event));
3928                        }
3929                    }
3930                }
3931            }
3932        }
3933        sub_it++;
3934        queue_seq++;
3935    }
3936    for (auto qs : other_queue_seqs) {
3937        skip |= VerifyQueueStateToSeq(dev_data, GetQueueState(dev_data, qs.first), qs.second);
3938    }
3939    return skip;
3940}
3941
3942// When the given fence is retired, verify outstanding queue operations through the point of the fence
3943static bool VerifyQueueStateToFence(layer_data *dev_data, VkFence fence) {
3944    auto fence_state = GetFenceNode(dev_data, fence);
3945    if (VK_NULL_HANDLE != fence_state->signaler.first) {
3946        return VerifyQueueStateToSeq(dev_data, GetQueueState(dev_data, fence_state->signaler.first), fence_state->signaler.second);
3947    }
3948    return false;
3949}
3950
3951// TODO: nuke this completely.
3952// Decrement cmd_buffer in_use and if it goes to 0 remove cmd_buffer from globalInFlightCmdBuffers
3953static inline void removeInFlightCmdBuffer(layer_data *dev_data, VkCommandBuffer cmd_buffer) {
3954    // Pull it off of global list initially, but if we find it in any other queue list, add it back in
3955    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, cmd_buffer);
3956    pCB->in_use.fetch_sub(1);
3957    if (!pCB->in_use.load()) {
3958        dev_data->globalInFlightCmdBuffers.erase(cmd_buffer);
3959    }
3960}
3961
3962// Decrement in-use count for objects bound to command buffer
3963static void DecrementBoundResources(layer_data *dev_data, GLOBAL_CB_NODE const *cb_node) {
3964    BASE_NODE *base_obj = nullptr;
3965    for (auto obj : cb_node->object_bindings) {
3966        base_obj = GetStateStructPtrFromObject(dev_data, obj);
3967        if (base_obj) {
3968            base_obj->in_use.fetch_sub(1);
3969        }
3970    }
3971}
3972
3973static void RetireWorkOnQueue(layer_data *dev_data, QUEUE_STATE *pQueue, uint64_t seq) {
3974    std::unordered_map<VkQueue, uint64_t> otherQueueSeqs;
3975
3976    // Roll this queue forward, one submission at a time.
3977    while (pQueue->seq < seq) {
3978        auto &submission = pQueue->submissions.front();
3979
3980        for (auto &wait : submission.waitSemaphores) {
3981            auto pSemaphore = GetSemaphoreNode(dev_data, wait.semaphore);
3982            if (pSemaphore) {
3983                pSemaphore->in_use.fetch_sub(1);
3984            }
3985            auto &lastSeq = otherQueueSeqs[wait.queue];
3986            lastSeq = std::max(lastSeq, wait.seq);
3987        }
3988
3989        for (auto &semaphore : submission.signalSemaphores) {
3990            auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
3991            if (pSemaphore) {
3992                pSemaphore->in_use.fetch_sub(1);
3993            }
3994        }
3995
3996        for (auto cb : submission.cbs) {
3997            auto cb_node = GetCBNode(dev_data, cb);
3998            if (!cb_node) {
3999                continue;
4000            }
4001            // First perform decrement on general case bound objects
4002            DecrementBoundResources(dev_data, cb_node);
4003            for (auto drawDataElement : cb_node->drawData) {
4004                for (auto buffer : drawDataElement.buffers) {
4005                    auto buffer_state = GetBufferState(dev_data, buffer);
4006                    if (buffer_state) {
4007                        buffer_state->in_use.fetch_sub(1);
4008                    }
4009                }
4010            }
4011            for (auto event : cb_node->writeEventsBeforeWait) {
4012                auto eventNode = dev_data->eventMap.find(event);
4013                if (eventNode != dev_data->eventMap.end()) {
4014                    eventNode->second.write_in_use--;
4015                }
4016            }
4017            for (auto queryStatePair : cb_node->queryToStateMap) {
4018                dev_data->queryToStateMap[queryStatePair.first] = queryStatePair.second;
4019            }
4020            for (auto eventStagePair : cb_node->eventToStageMap) {
4021                dev_data->eventMap[eventStagePair.first].stageMask = eventStagePair.second;
4022            }
4023
4024            removeInFlightCmdBuffer(dev_data, cb);
4025        }
4026
4027        auto pFence = GetFenceNode(dev_data, submission.fence);
4028        if (pFence) {
4029            pFence->state = FENCE_RETIRED;
4030        }
4031
4032        pQueue->submissions.pop_front();
4033        pQueue->seq++;
4034    }
4035
4036    // Roll other queues forward to the highest seq we saw a wait for
4037    for (auto qs : otherQueueSeqs) {
4038        RetireWorkOnQueue(dev_data, GetQueueState(dev_data, qs.first), qs.second);
4039    }
4040}
4041
4042// Submit a fence to a queue, delimiting previous fences and previous untracked
4043// work by it.
4044static void SubmitFence(QUEUE_STATE *pQueue, FENCE_NODE *pFence, uint64_t submitCount) {
4045    pFence->state = FENCE_INFLIGHT;
4046    pFence->signaler.first = pQueue->queue;
4047    pFence->signaler.second = pQueue->seq + pQueue->submissions.size() + submitCount;
4048}
4049
4050static bool validateCommandBufferSimultaneousUse(layer_data *dev_data, GLOBAL_CB_NODE *pCB, int current_submit_count) {
4051    bool skip = false;
4052    if ((dev_data->globalInFlightCmdBuffers.count(pCB->commandBuffer) || current_submit_count > 1) &&
4053        !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
4054        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4055                        __LINE__, VALIDATION_ERROR_00133, "DS",
4056                        "Command Buffer 0x%p is already in use and is not marked for simultaneous use. %s", pCB->commandBuffer,
4057                        validation_error_map[VALIDATION_ERROR_00133]);
4058    }
4059    return skip;
4060}
4061
4062static bool validateCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, const char *call_source,
4063                                       int current_submit_count) {
4064    bool skip = false;
4065    if (dev_data->instance_data->disabled.command_buffer_state) return skip;
4066    // Validate ONE_TIME_SUBMIT_BIT CB is not being submitted more than once
4067    if ((cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) &&
4068        (cb_state->submitCount + current_submit_count > 1)) {
4069        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4070                        __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
4071                        "Commandbuffer 0x%p was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT "
4072                        "set, but has been submitted 0x%" PRIxLEAST64 " times.",
4073                        cb_state->commandBuffer, cb_state->submitCount + current_submit_count);
4074    }
4075    // Validate that cmd buffers have been updated
4076    if (CB_RECORDED != cb_state->state) {
4077        if (CB_INVALID == cb_state->state) {
4078            skip |= ReportInvalidCommandBuffer(dev_data, cb_state, call_source);
4079        } else {  // Flag error for using CB w/o vkEndCommandBuffer() called
4080            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4081                            (uint64_t)(cb_state->commandBuffer), __LINE__, DRAWSTATE_NO_END_COMMAND_BUFFER, "DS",
4082                            "You must call vkEndCommandBuffer() on command buffer 0x%p before this call to %s!",
4083                            cb_state->commandBuffer, call_source);
4084        }
4085    }
4086    return skip;
4087}
4088
4089static bool validateResources(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
4090    bool skip = false;
4091
4092    // TODO : We should be able to remove the NULL look-up checks from the code below as long as
4093    //  all the corresponding cases are verified to cause CB_INVALID state and the CB_INVALID state
4094    //  should then be flagged prior to calling this function
4095    for (auto drawDataElement : cb_node->drawData) {
4096        for (auto buffer : drawDataElement.buffers) {
4097            auto buffer_state = GetBufferState(dev_data, buffer);
4098            if (!buffer_state) {
4099                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
4100                                (uint64_t)(buffer), __LINE__, DRAWSTATE_INVALID_BUFFER, "DS",
4101                                "Cannot submit cmd buffer using deleted buffer 0x%" PRIx64 ".", (uint64_t)(buffer));
4102            }
4103        }
4104    }
4105    return skip;
4106}
4107
4108// Check that the queue family index of 'queue' matches one of the entries in pQueueFamilyIndices
4109bool ValidImageBufferQueue(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, const VK_OBJECT *object, VkQueue queue, uint32_t count,
4110                           const uint32_t *indices) {
4111    bool found = false;
4112    bool skip = false;
4113    auto queue_state = GetQueueState(dev_data, queue);
4114    if (queue_state) {
4115        for (uint32_t i = 0; i < count; i++) {
4116            if (indices[i] == queue_state->queueFamilyIndex) {
4117                found = true;
4118                break;
4119            }
4120        }
4121
4122        if (!found) {
4123            skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, get_debug_report_enum[object->type],
4124                           object->handle, __LINE__, DRAWSTATE_INVALID_QUEUE_FAMILY, "DS",
4125                           "vkQueueSubmit: Command buffer 0x%" PRIxLEAST64 " contains %s 0x%" PRIxLEAST64
4126                           " which was not created allowing concurrent access to this queue family %d.",
4127                           reinterpret_cast<uint64_t>(cb_node->commandBuffer), object_string[object->type], object->handle,
4128                           queue_state->queueFamilyIndex);
4129        }
4130    }
4131    return skip;
4132}
4133
4134// Validate that queueFamilyIndices of primary command buffers match this queue
4135// Secondary command buffers were previously validated in vkCmdExecuteCommands().
4136static bool validateQueueFamilyIndices(layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkQueue queue) {
4137    bool skip = false;
4138    auto pPool = GetCommandPoolNode(dev_data, pCB->createInfo.commandPool);
4139    auto queue_state = GetQueueState(dev_data, queue);
4140
4141    if (pPool && queue_state) {
4142        if (pPool->queueFamilyIndex != queue_state->queueFamilyIndex) {
4143            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4144                            reinterpret_cast<uint64_t>(pCB->commandBuffer), __LINE__, VALIDATION_ERROR_00139, "DS",
4145                            "vkQueueSubmit: Primary command buffer 0x%p created in queue family %d is being submitted on queue "
4146                            "0x%p from queue family %d. %s",
4147                            pCB->commandBuffer, pPool->queueFamilyIndex, queue, queue_state->queueFamilyIndex,
4148                            validation_error_map[VALIDATION_ERROR_00139]);
4149        }
4150
4151        // Ensure that any bound images or buffers created with SHARING_MODE_CONCURRENT have access to the current queue family
4152        for (auto object : pCB->object_bindings) {
4153            if (object.type == kVulkanObjectTypeImage) {
4154                auto image_state = GetImageState(dev_data, reinterpret_cast<VkImage &>(object.handle));
4155                if (image_state && image_state->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
4156                    skip |= ValidImageBufferQueue(dev_data, pCB, &object, queue, image_state->createInfo.queueFamilyIndexCount,
4157                                                  image_state->createInfo.pQueueFamilyIndices);
4158                }
4159            } else if (object.type == kVulkanObjectTypeBuffer) {
4160                auto buffer_state = GetBufferState(dev_data, reinterpret_cast<VkBuffer &>(object.handle));
4161                if (buffer_state && buffer_state->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
4162                    skip |= ValidImageBufferQueue(dev_data, pCB, &object, queue, buffer_state->createInfo.queueFamilyIndexCount,
4163                                                  buffer_state->createInfo.pQueueFamilyIndices);
4164                }
4165            }
4166        }
4167    }
4168
4169    return skip;
4170}
4171
4172static bool validatePrimaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, int current_submit_count) {
4173    // Track in-use for resources off of primary and any secondary CBs
4174    bool skip = false;
4175
4176    // If USAGE_SIMULTANEOUS_USE_BIT not set then CB cannot already be executing
4177    // on device
4178    skip |= validateCommandBufferSimultaneousUse(dev_data, pCB, current_submit_count);
4179
4180    skip |= validateResources(dev_data, pCB);
4181
4182    if (!pCB->secondaryCommandBuffers.empty()) {
4183        for (auto secondaryCmdBuffer : pCB->secondaryCommandBuffers) {
4184            GLOBAL_CB_NODE *pSubCB = GetCBNode(dev_data, secondaryCmdBuffer);
4185            skip |= validateResources(dev_data, pSubCB);
4186            if ((pSubCB->primaryCommandBuffer != pCB->commandBuffer) &&
4187                !(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
4188                log_msg(
4189                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4190                    __LINE__, VALIDATION_ERROR_00135, "DS",
4191                    "Commandbuffer 0x%p was submitted with secondary buffer 0x%p but that buffer has subsequently been bound to "
4192                    "primary cmd buffer 0x%p and it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set. %s",
4193                    pCB->commandBuffer, secondaryCmdBuffer, pSubCB->primaryCommandBuffer,
4194                    validation_error_map[VALIDATION_ERROR_00135]);
4195            }
4196        }
4197    }
4198
4199    skip |= validateCommandBufferState(dev_data, pCB, "vkQueueSubmit()", current_submit_count);
4200
4201    return skip;
4202}
4203
4204static bool ValidateFenceForSubmit(layer_data *dev_data, FENCE_NODE *pFence) {
4205    bool skip = false;
4206
4207    if (pFence) {
4208        if (pFence->state == FENCE_INFLIGHT) {
4209            // TODO: opportunities for VALIDATION_ERROR_00127, VALIDATION_ERROR_01647, VALIDATION_ERROR_01953
4210            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4211                            (uint64_t)(pFence->fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
4212                            "Fence 0x%" PRIx64 " is already in use by another submission.", (uint64_t)(pFence->fence));
4213        }
4214
4215        else if (pFence->state == FENCE_RETIRED) {
4216            // TODO: opportunities for VALIDATION_ERROR_00126, VALIDATION_ERROR_01646, VALIDATION_ERROR_01953
4217            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4218                            reinterpret_cast<uint64_t &>(pFence->fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
4219                            "Fence 0x%" PRIxLEAST64 " submitted in SIGNALED state.  Fences must be reset before being submitted",
4220                            reinterpret_cast<uint64_t &>(pFence->fence));
4221        }
4222    }
4223
4224    return skip;
4225}
4226
4227static void PostCallRecordQueueSubmit(layer_data *dev_data, VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits,
4228                                      VkFence fence) {
4229    auto pQueue = GetQueueState(dev_data, queue);
4230    auto pFence = GetFenceNode(dev_data, fence);
4231
4232    // Mark the fence in-use.
4233    if (pFence) {
4234        SubmitFence(pQueue, pFence, std::max(1u, submitCount));
4235    }
4236
4237    // Now process each individual submit
4238    for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
4239        std::vector<VkCommandBuffer> cbs;
4240        const VkSubmitInfo *submit = &pSubmits[submit_idx];
4241        vector<SEMAPHORE_WAIT> semaphore_waits;
4242        vector<VkSemaphore> semaphore_signals;
4243        for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
4244            VkSemaphore semaphore = submit->pWaitSemaphores[i];
4245            auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
4246            if (pSemaphore) {
4247                if (pSemaphore->signaler.first != VK_NULL_HANDLE) {
4248                    semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second});
4249                    pSemaphore->in_use.fetch_add(1);
4250                }
4251                pSemaphore->signaler.first = VK_NULL_HANDLE;
4252                pSemaphore->signaled = false;
4253            }
4254        }
4255        for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
4256            VkSemaphore semaphore = submit->pSignalSemaphores[i];
4257            auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
4258            if (pSemaphore) {
4259                pSemaphore->signaler.first = queue;
4260                pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1;
4261                pSemaphore->signaled = true;
4262                pSemaphore->in_use.fetch_add(1);
4263                semaphore_signals.push_back(semaphore);
4264            }
4265        }
4266        for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
4267            auto cb_node = GetCBNode(dev_data, submit->pCommandBuffers[i]);
4268            if (cb_node) {
4269                cbs.push_back(submit->pCommandBuffers[i]);
4270                for (auto secondaryCmdBuffer : cb_node->secondaryCommandBuffers) {
4271                    cbs.push_back(secondaryCmdBuffer);
4272                }
4273                UpdateCmdBufImageLayouts(dev_data, cb_node);
4274                incrementResources(dev_data, cb_node);
4275                if (!cb_node->secondaryCommandBuffers.empty()) {
4276                    for (auto secondaryCmdBuffer : cb_node->secondaryCommandBuffers) {
4277                        GLOBAL_CB_NODE *pSubCB = GetCBNode(dev_data, secondaryCmdBuffer);
4278                        incrementResources(dev_data, pSubCB);
4279                    }
4280                }
4281            }
4282        }
4283        pQueue->submissions.emplace_back(cbs, semaphore_waits, semaphore_signals,
4284                                         submit_idx == submitCount - 1 ? fence : VK_NULL_HANDLE);
4285    }
4286
4287    if (pFence && !submitCount) {
4288        // If no submissions, but just dropping a fence on the end of the queue,
4289        // record an empty submission with just the fence, so we can determine
4290        // its completion.
4291        pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(), std::vector<SEMAPHORE_WAIT>(), std::vector<VkSemaphore>(),
4292                                         fence);
4293    }
4294}
4295
4296static bool PreCallValidateQueueSubmit(layer_data *dev_data, VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits,
4297                                       VkFence fence) {
4298    auto pFence = GetFenceNode(dev_data, fence);
4299    bool skip = ValidateFenceForSubmit(dev_data, pFence);
4300    if (skip) {
4301        return true;
4302    }
4303
4304    unordered_set<VkSemaphore> signaled_semaphores;
4305    unordered_set<VkSemaphore> unsignaled_semaphores;
4306    vector<VkCommandBuffer> current_cmds;
4307    unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> localImageLayoutMap = dev_data->imageLayoutMap;
4308    // Now verify each individual submit
4309    for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
4310        const VkSubmitInfo *submit = &pSubmits[submit_idx];
4311        for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
4312            skip |= ValidateStageMaskGsTsEnables(dev_data, submit->pWaitDstStageMask[i], "vkQueueSubmit()", VALIDATION_ERROR_00142,
4313                                                 VALIDATION_ERROR_00143);
4314            VkSemaphore semaphore = submit->pWaitSemaphores[i];
4315            auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
4316            if (pSemaphore) {
4317                if (unsignaled_semaphores.count(semaphore) ||
4318                    (!(signaled_semaphores.count(semaphore)) && !(pSemaphore->signaled))) {
4319                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
4320                                    reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
4321                                    "Queue 0x%p is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.", queue,
4322                                    reinterpret_cast<const uint64_t &>(semaphore));
4323                } else {
4324                    signaled_semaphores.erase(semaphore);
4325                    unsignaled_semaphores.insert(semaphore);
4326                }
4327            }
4328        }
4329        for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
4330            VkSemaphore semaphore = submit->pSignalSemaphores[i];
4331            auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
4332            if (pSemaphore) {
4333                if (signaled_semaphores.count(semaphore) || (!(unsignaled_semaphores.count(semaphore)) && pSemaphore->signaled)) {
4334                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
4335                                    reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
4336                                    "Queue 0x%p is signaling semaphore 0x%" PRIx64
4337                                    " that has already been signaled but not waited on by queue 0x%" PRIx64 ".",
4338                                    queue, reinterpret_cast<const uint64_t &>(semaphore),
4339                                    reinterpret_cast<uint64_t &>(pSemaphore->signaler.first));
4340                } else {
4341                    unsignaled_semaphores.erase(semaphore);
4342                    signaled_semaphores.insert(semaphore);
4343                }
4344            }
4345        }
4346        for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
4347            auto cb_node = GetCBNode(dev_data, submit->pCommandBuffers[i]);
4348            if (cb_node) {
4349                skip |= ValidateCmdBufImageLayouts(dev_data, cb_node, localImageLayoutMap);
4350                current_cmds.push_back(submit->pCommandBuffers[i]);
4351                skip |= validatePrimaryCommandBufferState(
4352                    dev_data, cb_node, (int)std::count(current_cmds.begin(), current_cmds.end(), submit->pCommandBuffers[i]));
4353                skip |= validateQueueFamilyIndices(dev_data, cb_node, queue);
4354
4355                // Potential early exit here as bad object state may crash in delayed function calls
4356                if (skip) {
4357                    return true;
4358                }
4359
4360                // Call submit-time functions to validate/update state
4361                for (auto &function : cb_node->validate_functions) {
4362                    skip |= function();
4363                }
4364                for (auto &function : cb_node->eventUpdates) {
4365                    skip |= function(queue);
4366                }
4367                for (auto &function : cb_node->queryUpdates) {
4368                    skip |= function(queue);
4369                }
4370            }
4371        }
4372    }
4373    return skip;
4374}
4375
4376VKAPI_ATTR VkResult VKAPI_CALL QueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
4377    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
4378    std::unique_lock<std::mutex> lock(global_lock);
4379
4380    bool skip = PreCallValidateQueueSubmit(dev_data, queue, submitCount, pSubmits, fence);
4381    lock.unlock();
4382
4383    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4384
4385    VkResult result = dev_data->dispatch_table.QueueSubmit(queue, submitCount, pSubmits, fence);
4386
4387    lock.lock();
4388    PostCallRecordQueueSubmit(dev_data, queue, submitCount, pSubmits, fence);
4389    lock.unlock();
4390    return result;
4391}
4392
4393static bool PreCallValidateAllocateMemory(layer_data *dev_data) {
4394    bool skip = false;
4395    if (dev_data->memObjMap.size() >= dev_data->phys_dev_properties.properties.limits.maxMemoryAllocationCount) {
4396        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
4397                        reinterpret_cast<const uint64_t &>(dev_data->device), __LINE__, VALIDATION_ERROR_00611, "MEM",
4398                        "Number of currently valid memory objects is not less than the maximum allowed (%u). %s",
4399                        dev_data->phys_dev_properties.properties.limits.maxMemoryAllocationCount,
4400                        validation_error_map[VALIDATION_ERROR_00611]);
4401    }
4402    return skip;
4403}
4404
4405static void PostCallRecordAllocateMemory(layer_data *dev_data, const VkMemoryAllocateInfo *pAllocateInfo, VkDeviceMemory *pMemory) {
4406    add_mem_obj_info(dev_data, dev_data->device, *pMemory, pAllocateInfo);
4407    return;
4408}
4409
4410VKAPI_ATTR VkResult VKAPI_CALL AllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
4411                                              const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) {
4412    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
4413    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4414    std::unique_lock<std::mutex> lock(global_lock);
4415    bool skip = PreCallValidateAllocateMemory(dev_data);
4416    if (!skip) {
4417        lock.unlock();
4418        result = dev_data->dispatch_table.AllocateMemory(device, pAllocateInfo, pAllocator, pMemory);
4419        lock.lock();
4420        if (VK_SUCCESS == result) {
4421            PostCallRecordAllocateMemory(dev_data, pAllocateInfo, pMemory);
4422        }
4423    }
4424    return result;
4425}
4426
4427// For given obj node, if it is use, flag a validation error and return callback result, else return false
4428bool ValidateObjectNotInUse(const layer_data *dev_data, BASE_NODE *obj_node, VK_OBJECT obj_struct,
4429                            UNIQUE_VALIDATION_ERROR_CODE error_code) {
4430    if (dev_data->instance_data->disabled.object_in_use) return false;
4431    bool skip = false;
4432    if (obj_node->in_use.load()) {
4433        skip |=
4434            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, get_debug_report_enum[obj_struct.type], obj_struct.handle,
4435                    __LINE__, error_code, "DS", "Cannot delete %s 0x%" PRIx64 " that is currently in use by a command buffer. %s",
4436                    object_string[obj_struct.type], obj_struct.handle, validation_error_map[error_code]);
4437    }
4438    return skip;
4439}
4440
4441static bool PreCallValidateFreeMemory(layer_data *dev_data, VkDeviceMemory mem, DEVICE_MEM_INFO **mem_info, VK_OBJECT *obj_struct) {
4442    *mem_info = GetMemObjInfo(dev_data, mem);
4443    *obj_struct = {reinterpret_cast<uint64_t &>(mem), kVulkanObjectTypeDeviceMemory};
4444    if (dev_data->instance_data->disabled.free_memory) return false;
4445    bool skip = false;
4446    if (*mem_info) {
4447        skip |= ValidateObjectNotInUse(dev_data, *mem_info, *obj_struct, VALIDATION_ERROR_00620);
4448    }
4449    return skip;
4450}
4451
4452static void PostCallRecordFreeMemory(layer_data *dev_data, VkDeviceMemory mem, DEVICE_MEM_INFO *mem_info, VK_OBJECT obj_struct) {
4453    // Clear mem binding for any bound objects
4454    for (auto obj : mem_info->obj_bindings) {
4455        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, get_debug_report_enum[obj.type], obj.handle, __LINE__,
4456                MEMTRACK_FREED_MEM_REF, "MEM", "VK Object 0x%" PRIxLEAST64 " still has a reference to mem obj 0x%" PRIxLEAST64,
4457                obj.handle, (uint64_t)mem_info->mem);
4458        switch (obj.type) {
4459            case kVulkanObjectTypeImage: {
4460                auto image_state = GetImageState(dev_data, reinterpret_cast<VkImage &>(obj.handle));
4461                assert(image_state);  // Any destroyed images should already be removed from bindings
4462                image_state->binding.mem = MEMORY_UNBOUND;
4463                break;
4464            }
4465            case kVulkanObjectTypeBuffer: {
4466                auto buffer_state = GetBufferState(dev_data, reinterpret_cast<VkBuffer &>(obj.handle));
4467                assert(buffer_state);  // Any destroyed buffers should already be removed from bindings
4468                buffer_state->binding.mem = MEMORY_UNBOUND;
4469                break;
4470            }
4471            default:
4472                // Should only have buffer or image objects bound to memory
4473                assert(0);
4474        }
4475    }
4476    // Any bound cmd buffers are now invalid
4477    invalidateCommandBuffers(dev_data, mem_info->cb_bindings, obj_struct);
4478    dev_data->memObjMap.erase(mem);
4479}
4480
4481VKAPI_ATTR void VKAPI_CALL FreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
4482    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4483    DEVICE_MEM_INFO *mem_info = nullptr;
4484    VK_OBJECT obj_struct;
4485    std::unique_lock<std::mutex> lock(global_lock);
4486    bool skip = PreCallValidateFreeMemory(dev_data, mem, &mem_info, &obj_struct);
4487    if (!skip) {
4488        lock.unlock();
4489        dev_data->dispatch_table.FreeMemory(device, mem, pAllocator);
4490        lock.lock();
4491        if (mem != VK_NULL_HANDLE) {
4492            PostCallRecordFreeMemory(dev_data, mem, mem_info, obj_struct);
4493        }
4494    }
4495}
4496
4497// Validate that given Map memory range is valid. This means that the memory should not already be mapped,
4498//  and that the size of the map range should be:
4499//  1. Not zero
4500//  2. Within the size of the memory allocation
4501static bool ValidateMapMemRange(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
4502    bool skip = false;
4503
4504    if (size == 0) {
4505        skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
4506                       (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
4507                       "VkMapMemory: Attempting to map memory range of size zero");
4508    }
4509
4510    auto mem_element = dev_data->memObjMap.find(mem);
4511    if (mem_element != dev_data->memObjMap.end()) {
4512        auto mem_info = mem_element->second.get();
4513        // It is an application error to call VkMapMemory on an object that is already mapped
4514        if (mem_info->mem_range.size != 0) {
4515            skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
4516                           (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
4517                           "VkMapMemory: Attempting to map memory on an already-mapped object 0x%" PRIxLEAST64, (uint64_t)mem);
4518        }
4519
4520        // Validate that offset + size is within object's allocationSize
4521        if (size == VK_WHOLE_SIZE) {
4522            if (offset >= mem_info->alloc_info.allocationSize) {
4523                skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
4524                               (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
4525                               "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64
4526                               " with size of VK_WHOLE_SIZE oversteps total array size 0x%" PRIx64,
4527                               offset, mem_info->alloc_info.allocationSize, mem_info->alloc_info.allocationSize);
4528            }
4529        } else {
4530            if ((offset + size) > mem_info->alloc_info.allocationSize) {
4531                skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
4532                               (uint64_t)mem, __LINE__, VALIDATION_ERROR_00628, "MEM",
4533                               "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64 " oversteps total array size 0x%" PRIx64 ". %s",
4534                               offset, size + offset, mem_info->alloc_info.allocationSize,
4535                               validation_error_map[VALIDATION_ERROR_00628]);
4536            }
4537        }
4538    }
4539    return skip;
4540}
4541
4542static void storeMemRanges(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
4543    auto mem_info = GetMemObjInfo(dev_data, mem);
4544    if (mem_info) {
4545        mem_info->mem_range.offset = offset;
4546        mem_info->mem_range.size = size;
4547    }
4548}
4549
4550static bool deleteMemRanges(layer_data *dev_data, VkDeviceMemory mem) {
4551    bool skip = false;
4552    auto mem_info = GetMemObjInfo(dev_data, mem);
4553    if (mem_info) {
4554        if (!mem_info->mem_range.size) {
4555            // Valid Usage: memory must currently be mapped
4556            skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
4557                           (uint64_t)mem, __LINE__, VALIDATION_ERROR_00649, "MEM",
4558                           "Unmapping Memory without memory being mapped: mem obj 0x%" PRIxLEAST64 ". %s", (uint64_t)mem,
4559                           validation_error_map[VALIDATION_ERROR_00649]);
4560        }
4561        mem_info->mem_range.size = 0;
4562        if (mem_info->shadow_copy) {
4563            free(mem_info->shadow_copy_base);
4564            mem_info->shadow_copy_base = 0;
4565            mem_info->shadow_copy = 0;
4566        }
4567    }
4568    return skip;
4569}
4570
4571// Guard value for pad data
4572static char NoncoherentMemoryFillValue = 0xb;
4573
4574static void initializeAndTrackMemory(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size,
4575                                     void **ppData) {
4576    auto mem_info = GetMemObjInfo(dev_data, mem);
4577    if (mem_info) {
4578        mem_info->p_driver_data = *ppData;
4579        uint32_t index = mem_info->alloc_info.memoryTypeIndex;
4580        if (dev_data->phys_dev_mem_props.memoryTypes[index].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) {
4581            mem_info->shadow_copy = 0;
4582        } else {
4583            if (size == VK_WHOLE_SIZE) {
4584                size = mem_info->alloc_info.allocationSize - offset;
4585            }
4586            mem_info->shadow_pad_size = dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment;
4587            assert(SafeModulo(mem_info->shadow_pad_size,
4588                                  dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment) == 0);
4589            // Ensure start of mapped region reflects hardware alignment constraints
4590            uint64_t map_alignment = dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment;
4591
4592            // From spec: (ppData - offset) must be aligned to at least limits::minMemoryMapAlignment.
4593            uint64_t start_offset = offset % map_alignment;
4594            // Data passed to driver will be wrapped by a guardband of data to detect over- or under-writes.
4595            mem_info->shadow_copy_base =
4596                malloc(static_cast<size_t>(2 * mem_info->shadow_pad_size + size + map_alignment + start_offset));
4597
4598            mem_info->shadow_copy =
4599                reinterpret_cast<char *>((reinterpret_cast<uintptr_t>(mem_info->shadow_copy_base) + map_alignment) &
4600                                         ~(map_alignment - 1)) +
4601                start_offset;
4602            assert(SafeModulo(reinterpret_cast<uintptr_t>(mem_info->shadow_copy) + mem_info->shadow_pad_size - start_offset,
4603                                  map_alignment) == 0);
4604
4605            memset(mem_info->shadow_copy, NoncoherentMemoryFillValue, static_cast<size_t>(2 * mem_info->shadow_pad_size + size));
4606            *ppData = static_cast<char *>(mem_info->shadow_copy) + mem_info->shadow_pad_size;
4607        }
4608    }
4609}
4610
4611// Verify that state for fence being waited on is appropriate. That is,
4612//  a fence being waited on should not already be signaled and
4613//  it should have been submitted on a queue or during acquire next image
4614static inline bool verifyWaitFenceState(layer_data *dev_data, VkFence fence, const char *apiCall) {
4615    bool skip = false;
4616
4617    auto pFence = GetFenceNode(dev_data, fence);
4618    if (pFence) {
4619        if (pFence->state == FENCE_UNSIGNALED) {
4620            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4621                            reinterpret_cast<uint64_t &>(fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
4622                            "%s called for fence 0x%" PRIxLEAST64
4623                            " which has not been submitted on a Queue or during "
4624                            "acquire next image.",
4625                            apiCall, reinterpret_cast<uint64_t &>(fence));
4626        }
4627    }
4628    return skip;
4629}
4630
4631static void RetireFence(layer_data *dev_data, VkFence fence) {
4632    auto pFence = GetFenceNode(dev_data, fence);
4633    if (pFence->signaler.first != VK_NULL_HANDLE) {
4634        // Fence signaller is a queue -- use this as proof that prior operations on that queue have completed.
4635        RetireWorkOnQueue(dev_data, GetQueueState(dev_data, pFence->signaler.first), pFence->signaler.second);
4636    } else {
4637        // Fence signaller is the WSI. We're not tracking what the WSI op actually /was/ in CV yet, but we need to mark
4638        // the fence as retired.
4639        pFence->state = FENCE_RETIRED;
4640    }
4641}
4642
4643static bool PreCallValidateWaitForFences(layer_data *dev_data, uint32_t fence_count, const VkFence *fences) {
4644    if (dev_data->instance_data->disabled.wait_for_fences) return false;
4645    bool skip = false;
4646    for (uint32_t i = 0; i < fence_count; i++) {
4647        skip |= verifyWaitFenceState(dev_data, fences[i], "vkWaitForFences");
4648        skip |= VerifyQueueStateToFence(dev_data, fences[i]);
4649    }
4650    return skip;
4651}
4652
4653static void PostCallRecordWaitForFences(layer_data *dev_data, uint32_t fence_count, const VkFence *fences, VkBool32 wait_all) {
4654    // When we know that all fences are complete we can clean/remove their CBs
4655    if ((VK_TRUE == wait_all) || (1 == fence_count)) {
4656        for (uint32_t i = 0; i < fence_count; i++) {
4657            RetireFence(dev_data, fences[i]);
4658        }
4659    }
4660    // NOTE : Alternate case not handled here is when some fences have completed. In
4661    //  this case for app to guarantee which fences completed it will have to call
4662    //  vkGetFenceStatus() at which point we'll clean/remove their CBs if complete.
4663}
4664
4665VKAPI_ATTR VkResult VKAPI_CALL WaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll,
4666                                             uint64_t timeout) {
4667    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4668    // Verify fence status of submitted fences
4669    std::unique_lock<std::mutex> lock(global_lock);
4670    bool skip = PreCallValidateWaitForFences(dev_data, fenceCount, pFences);
4671    lock.unlock();
4672    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4673
4674    VkResult result = dev_data->dispatch_table.WaitForFences(device, fenceCount, pFences, waitAll, timeout);
4675
4676    if (result == VK_SUCCESS) {
4677        lock.lock();
4678        PostCallRecordWaitForFences(dev_data, fenceCount, pFences, waitAll);
4679        lock.unlock();
4680    }
4681    return result;
4682}
4683
4684static bool PreCallValidateGetFenceStatus(layer_data *dev_data, VkFence fence) {
4685    if (dev_data->instance_data->disabled.get_fence_state) return false;
4686    return verifyWaitFenceState(dev_data, fence, "vkGetFenceStatus");
4687}
4688
4689static void PostCallRecordGetFenceStatus(layer_data *dev_data, VkFence fence) { RetireFence(dev_data, fence); }
4690
4691VKAPI_ATTR VkResult VKAPI_CALL GetFenceStatus(VkDevice device, VkFence fence) {
4692    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4693    std::unique_lock<std::mutex> lock(global_lock);
4694    bool skip = PreCallValidateGetFenceStatus(dev_data, fence);
4695    lock.unlock();
4696    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4697
4698    VkResult result = dev_data->dispatch_table.GetFenceStatus(device, fence);
4699    if (result == VK_SUCCESS) {
4700        lock.lock();
4701        PostCallRecordGetFenceStatus(dev_data, fence);
4702        lock.unlock();
4703    }
4704    return result;
4705}
4706
4707static void PostCallRecordGetDeviceQueue(layer_data *dev_data, uint32_t q_family_index, VkQueue queue) {
4708    // Add queue to tracking set only if it is new
4709    auto result = dev_data->queues.emplace(queue);
4710    if (result.second == true) {
4711        QUEUE_STATE *queue_state = &dev_data->queueMap[queue];
4712        queue_state->queue = queue;
4713        queue_state->queueFamilyIndex = q_family_index;
4714        queue_state->seq = 0;
4715    }
4716}
4717
4718VKAPI_ATTR void VKAPI_CALL GetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue *pQueue) {
4719    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4720    dev_data->dispatch_table.GetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue);
4721    std::lock_guard<std::mutex> lock(global_lock);
4722
4723    PostCallRecordGetDeviceQueue(dev_data, queueFamilyIndex, *pQueue);
4724}
4725
4726static bool PreCallValidateQueueWaitIdle(layer_data *dev_data, VkQueue queue, QUEUE_STATE **queue_state) {
4727    *queue_state = GetQueueState(dev_data, queue);
4728    if (dev_data->instance_data->disabled.queue_wait_idle) return false;
4729    return VerifyQueueStateToSeq(dev_data, *queue_state, (*queue_state)->seq + (*queue_state)->submissions.size());
4730}
4731
4732static void PostCallRecordQueueWaitIdle(layer_data *dev_data, QUEUE_STATE *queue_state) {
4733    RetireWorkOnQueue(dev_data, queue_state, queue_state->seq + queue_state->submissions.size());
4734}
4735
4736VKAPI_ATTR VkResult VKAPI_CALL QueueWaitIdle(VkQueue queue) {
4737    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
4738    QUEUE_STATE *queue_state = nullptr;
4739    std::unique_lock<std::mutex> lock(global_lock);
4740    bool skip = PreCallValidateQueueWaitIdle(dev_data, queue, &queue_state);
4741    lock.unlock();
4742    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4743    VkResult result = dev_data->dispatch_table.QueueWaitIdle(queue);
4744    if (VK_SUCCESS == result) {
4745        lock.lock();
4746        PostCallRecordQueueWaitIdle(dev_data, queue_state);
4747        lock.unlock();
4748    }
4749    return result;
4750}
4751
4752static bool PreCallValidateDeviceWaitIdle(layer_data *dev_data) {
4753    if (dev_data->instance_data->disabled.device_wait_idle) return false;
4754    bool skip = false;
4755    for (auto &queue : dev_data->queueMap) {
4756        skip |= VerifyQueueStateToSeq(dev_data, &queue.second, queue.second.seq + queue.second.submissions.size());
4757    }
4758    return skip;
4759}
4760
4761static void PostCallRecordDeviceWaitIdle(layer_data *dev_data) {
4762    for (auto &queue : dev_data->queueMap) {
4763        RetireWorkOnQueue(dev_data, &queue.second, queue.second.seq + queue.second.submissions.size());
4764    }
4765}
4766
4767VKAPI_ATTR VkResult VKAPI_CALL DeviceWaitIdle(VkDevice device) {
4768    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4769    std::unique_lock<std::mutex> lock(global_lock);
4770    bool skip = PreCallValidateDeviceWaitIdle(dev_data);
4771    lock.unlock();
4772    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4773    VkResult result = dev_data->dispatch_table.DeviceWaitIdle(device);
4774    if (VK_SUCCESS == result) {
4775        lock.lock();
4776        PostCallRecordDeviceWaitIdle(dev_data);
4777        lock.unlock();
4778    }
4779    return result;
4780}
4781
4782static bool PreCallValidateDestroyFence(layer_data *dev_data, VkFence fence, FENCE_NODE **fence_node, VK_OBJECT *obj_struct) {
4783    *fence_node = GetFenceNode(dev_data, fence);
4784    *obj_struct = {reinterpret_cast<uint64_t &>(fence), kVulkanObjectTypeFence};
4785    if (dev_data->instance_data->disabled.destroy_fence) return false;
4786    bool skip = false;
4787    if (*fence_node) {
4788        if ((*fence_node)->state == FENCE_INFLIGHT) {
4789            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4790                            (uint64_t)(fence), __LINE__, VALIDATION_ERROR_00173, "DS", "Fence 0x%" PRIx64 " is in use. %s",
4791                            (uint64_t)(fence), validation_error_map[VALIDATION_ERROR_00173]);
4792        }
4793    }
4794    return skip;
4795}
4796
4797static void PostCallRecordDestroyFence(layer_data *dev_data, VkFence fence) { dev_data->fenceMap.erase(fence); }
4798
4799VKAPI_ATTR void VKAPI_CALL DestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) {
4800    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4801    // Common data objects used pre & post call
4802    FENCE_NODE *fence_node = nullptr;
4803    VK_OBJECT obj_struct;
4804    std::unique_lock<std::mutex> lock(global_lock);
4805    bool skip = PreCallValidateDestroyFence(dev_data, fence, &fence_node, &obj_struct);
4806
4807    if (!skip) {
4808        lock.unlock();
4809        dev_data->dispatch_table.DestroyFence(device, fence, pAllocator);
4810        lock.lock();
4811        PostCallRecordDestroyFence(dev_data, fence);
4812    }
4813}
4814
4815static bool PreCallValidateDestroySemaphore(layer_data *dev_data, VkSemaphore semaphore, SEMAPHORE_NODE **sema_node,
4816                                            VK_OBJECT *obj_struct) {
4817    *sema_node = GetSemaphoreNode(dev_data, semaphore);
4818    *obj_struct = {reinterpret_cast<uint64_t &>(semaphore), kVulkanObjectTypeSemaphore};
4819    if (dev_data->instance_data->disabled.destroy_semaphore) return false;
4820    bool skip = false;
4821    if (*sema_node) {
4822        skip |= ValidateObjectNotInUse(dev_data, *sema_node, *obj_struct, VALIDATION_ERROR_00199);
4823    }
4824    return skip;
4825}
4826
4827static void PostCallRecordDestroySemaphore(layer_data *dev_data, VkSemaphore sema) { dev_data->semaphoreMap.erase(sema); }
4828
4829VKAPI_ATTR void VKAPI_CALL DestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) {
4830    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4831    SEMAPHORE_NODE *sema_node;
4832    VK_OBJECT obj_struct;
4833    std::unique_lock<std::mutex> lock(global_lock);
4834    bool skip = PreCallValidateDestroySemaphore(dev_data, semaphore, &sema_node, &obj_struct);
4835    if (!skip) {
4836        lock.unlock();
4837        dev_data->dispatch_table.DestroySemaphore(device, semaphore, pAllocator);
4838        lock.lock();
4839        PostCallRecordDestroySemaphore(dev_data, semaphore);
4840    }
4841}
4842
4843static bool PreCallValidateDestroyEvent(layer_data *dev_data, VkEvent event, EVENT_STATE **event_state, VK_OBJECT *obj_struct) {
4844    *event_state = GetEventNode(dev_data, event);
4845    *obj_struct = {reinterpret_cast<uint64_t &>(event), kVulkanObjectTypeEvent};
4846    if (dev_data->instance_data->disabled.destroy_event) return false;
4847    bool skip = false;
4848    if (*event_state) {
4849        skip |= ValidateObjectNotInUse(dev_data, *event_state, *obj_struct, VALIDATION_ERROR_00213);
4850    }
4851    return skip;
4852}
4853
4854static void PostCallRecordDestroyEvent(layer_data *dev_data, VkEvent event, EVENT_STATE *event_state, VK_OBJECT obj_struct) {
4855    invalidateCommandBuffers(dev_data, event_state->cb_bindings, obj_struct);
4856    dev_data->eventMap.erase(event);
4857}
4858
4859VKAPI_ATTR void VKAPI_CALL DestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
4860    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4861    EVENT_STATE *event_state = nullptr;
4862    VK_OBJECT obj_struct;
4863    std::unique_lock<std::mutex> lock(global_lock);
4864    bool skip = PreCallValidateDestroyEvent(dev_data, event, &event_state, &obj_struct);
4865    if (!skip) {
4866        lock.unlock();
4867        dev_data->dispatch_table.DestroyEvent(device, event, pAllocator);
4868        lock.lock();
4869        if (event != VK_NULL_HANDLE) {
4870            PostCallRecordDestroyEvent(dev_data, event, event_state, obj_struct);
4871        }
4872    }
4873}
4874
4875static bool PreCallValidateDestroyQueryPool(layer_data *dev_data, VkQueryPool query_pool, QUERY_POOL_NODE **qp_state,
4876                                            VK_OBJECT *obj_struct) {
4877    *qp_state = GetQueryPoolNode(dev_data, query_pool);
4878    *obj_struct = {reinterpret_cast<uint64_t &>(query_pool), kVulkanObjectTypeQueryPool};
4879    if (dev_data->instance_data->disabled.destroy_query_pool) return false;
4880    bool skip = false;
4881    if (*qp_state) {
4882        skip |= ValidateObjectNotInUse(dev_data, *qp_state, *obj_struct, VALIDATION_ERROR_01012);
4883    }
4884    return skip;
4885}
4886
4887static void PostCallRecordDestroyQueryPool(layer_data *dev_data, VkQueryPool query_pool, QUERY_POOL_NODE *qp_state,
4888                                           VK_OBJECT obj_struct) {
4889    invalidateCommandBuffers(dev_data, qp_state->cb_bindings, obj_struct);
4890    dev_data->queryPoolMap.erase(query_pool);
4891}
4892
4893VKAPI_ATTR void VKAPI_CALL DestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) {
4894    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4895    QUERY_POOL_NODE *qp_state = nullptr;
4896    VK_OBJECT obj_struct;
4897    std::unique_lock<std::mutex> lock(global_lock);
4898    bool skip = PreCallValidateDestroyQueryPool(dev_data, queryPool, &qp_state, &obj_struct);
4899    if (!skip) {
4900        lock.unlock();
4901        dev_data->dispatch_table.DestroyQueryPool(device, queryPool, pAllocator);
4902        lock.lock();
4903        if (queryPool != VK_NULL_HANDLE) {
4904            PostCallRecordDestroyQueryPool(dev_data, queryPool, qp_state, obj_struct);
4905        }
4906    }
4907}
4908static bool PreCallValidateGetQueryPoolResults(layer_data *dev_data, VkQueryPool query_pool, uint32_t first_query,
4909                                               uint32_t query_count, VkQueryResultFlags flags,
4910                                               unordered_map<QueryObject, vector<VkCommandBuffer>> *queries_in_flight) {
4911    for (auto cmd_buffer : dev_data->globalInFlightCmdBuffers) {
4912        auto cb = GetCBNode(dev_data, cmd_buffer);
4913        for (auto query_state_pair : cb->queryToStateMap) {
4914            (*queries_in_flight)[query_state_pair.first].push_back(cmd_buffer);
4915        }
4916    }
4917    if (dev_data->instance_data->disabled.get_query_pool_results) return false;
4918    bool skip = false;
4919    for (uint32_t i = 0; i < query_count; ++i) {
4920        QueryObject query = {query_pool, first_query + i};
4921        auto qif_pair = queries_in_flight->find(query);
4922        auto query_state_pair = dev_data->queryToStateMap.find(query);
4923        if (query_state_pair != dev_data->queryToStateMap.end()) {
4924            // Available and in flight
4925            if (qif_pair != queries_in_flight->end() && query_state_pair != dev_data->queryToStateMap.end() &&
4926                query_state_pair->second) {
4927                for (auto cmd_buffer : qif_pair->second) {
4928                    auto cb = GetCBNode(dev_data, cmd_buffer);
4929                    auto query_event_pair = cb->waitedEventsBeforeQueryReset.find(query);
4930                    if (query_event_pair == cb->waitedEventsBeforeQueryReset.end()) {
4931                        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4932                                        VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
4933                                        "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is in flight.",
4934                                        (uint64_t)(query_pool), first_query + i);
4935                    }
4936                }
4937                // Unavailable and in flight
4938            } else if (qif_pair != queries_in_flight->end() && query_state_pair != dev_data->queryToStateMap.end() &&
4939                       !query_state_pair->second) {
4940                // TODO : Can there be the same query in use by multiple command buffers in flight?
4941                bool make_available = false;
4942                for (auto cmd_buffer : qif_pair->second) {
4943                    auto cb = GetCBNode(dev_data, cmd_buffer);
4944                    make_available |= cb->queryToStateMap[query];
4945                }
4946                if (!(((flags & VK_QUERY_RESULT_PARTIAL_BIT) || (flags & VK_QUERY_RESULT_WAIT_BIT)) && make_available)) {
4947                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4948                                    VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
4949                                    "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is unavailable.",
4950                                    (uint64_t)(query_pool), first_query + i);
4951                }
4952                // Unavailable
4953            } else if (query_state_pair != dev_data->queryToStateMap.end() && !query_state_pair->second) {
4954                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
4955                                __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
4956                                "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is unavailable.",
4957                                (uint64_t)(query_pool), first_query + i);
4958                // Uninitialized
4959            } else if (query_state_pair == dev_data->queryToStateMap.end()) {
4960                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
4961                                __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
4962                                "Cannot get query results on queryPool 0x%" PRIx64
4963                                " with index %d as data has not been collected for this index.",
4964                                (uint64_t)(query_pool), first_query + i);
4965            }
4966        }
4967    }
4968    return skip;
4969}
4970
4971static void PostCallRecordGetQueryPoolResults(layer_data *dev_data, VkQueryPool query_pool, uint32_t first_query,
4972                                              uint32_t query_count,
4973                                              unordered_map<QueryObject, vector<VkCommandBuffer>> *queries_in_flight) {
4974    for (uint32_t i = 0; i < query_count; ++i) {
4975        QueryObject query = {query_pool, first_query + i};
4976        auto qif_pair = queries_in_flight->find(query);
4977        auto query_state_pair = dev_data->queryToStateMap.find(query);
4978        if (query_state_pair != dev_data->queryToStateMap.end()) {
4979            // Available and in flight
4980            if (qif_pair != queries_in_flight->end() && query_state_pair != dev_data->queryToStateMap.end() &&
4981                query_state_pair->second) {
4982                for (auto cmd_buffer : qif_pair->second) {
4983                    auto cb = GetCBNode(dev_data, cmd_buffer);
4984                    auto query_event_pair = cb->waitedEventsBeforeQueryReset.find(query);
4985                    if (query_event_pair != cb->waitedEventsBeforeQueryReset.end()) {
4986                        for (auto event : query_event_pair->second) {
4987                            dev_data->eventMap[event].needsSignaled = true;
4988                        }
4989                    }
4990                }
4991            }
4992        }
4993    }
4994}
4995
4996VKAPI_ATTR VkResult VKAPI_CALL GetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
4997                                                   size_t dataSize, void *pData, VkDeviceSize stride, VkQueryResultFlags flags) {
4998    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4999    unordered_map<QueryObject, vector<VkCommandBuffer>> queries_in_flight;
5000    std::unique_lock<std::mutex> lock(global_lock);
5001    bool skip = PreCallValidateGetQueryPoolResults(dev_data, queryPool, firstQuery, queryCount, flags, &queries_in_flight);
5002    lock.unlock();
5003    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
5004    VkResult result =
5005        dev_data->dispatch_table.GetQueryPoolResults(device, queryPool, firstQuery, queryCount, dataSize, pData, stride, flags);
5006    lock.lock();
5007    PostCallRecordGetQueryPoolResults(dev_data, queryPool, firstQuery, queryCount, &queries_in_flight);
5008    lock.unlock();
5009    return result;
5010}
5011
5012// Return true if given ranges intersect, else false
5013// Prereq : For both ranges, range->end - range->start > 0. This case should have already resulted
5014//  in an error so not checking that here
5015// pad_ranges bool indicates a linear and non-linear comparison which requires padding
5016// In the case where padding is required, if an alias is encountered then a validation error is reported and skip
5017//  may be set by the callback function so caller should merge in skip value if padding case is possible.
5018// This check can be skipped by passing skip_checks=true, for call sites outside the validation path.
5019static bool rangesIntersect(layer_data const *dev_data, MEMORY_RANGE const *range1, MEMORY_RANGE const *range2, bool *skip,
5020                            bool skip_checks) {
5021    *skip = false;
5022    auto r1_start = range1->start;
5023    auto r1_end = range1->end;
5024    auto r2_start = range2->start;
5025    auto r2_end = range2->end;
5026    VkDeviceSize pad_align = 1;
5027    if (range1->linear != range2->linear) {
5028        pad_align = dev_data->phys_dev_properties.properties.limits.bufferImageGranularity;
5029    }
5030    if ((r1_end & ~(pad_align - 1)) < (r2_start & ~(pad_align - 1))) return false;
5031    if ((r1_start & ~(pad_align - 1)) > (r2_end & ~(pad_align - 1))) return false;
5032
5033    if (!skip_checks && (range1->linear != range2->linear)) {
5034        // In linear vs. non-linear case, warn of aliasing
5035        const char *r1_linear_str = range1->linear ? "Linear" : "Non-linear";
5036        const char *r1_type_str = range1->image ? "image" : "buffer";
5037        const char *r2_linear_str = range2->linear ? "linear" : "non-linear";
5038        const char *r2_type_str = range2->image ? "image" : "buffer";
5039        auto obj_type = range1->image ? VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT : VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT;
5040        *skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, obj_type, range1->handle, 0,
5041                         MEMTRACK_INVALID_ALIASING, "MEM", "%s %s 0x%" PRIx64 " is aliased with %s %s 0x%" PRIx64
5042                                                           " which may indicate a bug. For further info refer to the "
5043                                                           "Buffer-Image Granularity section of the Vulkan specification. "
5044                                                           "(https://www.khronos.org/registry/vulkan/specs/1.0-extensions/"
5045                                                           "xhtml/vkspec.html#resources-bufferimagegranularity)",
5046                         r1_linear_str, r1_type_str, range1->handle, r2_linear_str, r2_type_str, range2->handle);
5047    }
5048    // Ranges intersect
5049    return true;
5050}
5051// Simplified rangesIntersect that calls above function to check range1 for intersection with offset & end addresses
5052bool rangesIntersect(layer_data const *dev_data, MEMORY_RANGE const *range1, VkDeviceSize offset, VkDeviceSize end) {
5053    // Create a local MEMORY_RANGE struct to wrap offset/size
5054    MEMORY_RANGE range_wrap;
5055    // Synch linear with range1 to avoid padding and potential validation error case
5056    range_wrap.linear = range1->linear;
5057    range_wrap.start = offset;
5058    range_wrap.end = end;
5059    bool tmp_bool;
5060    return rangesIntersect(dev_data, range1, &range_wrap, &tmp_bool, true);
5061}
5062// For given mem_info, set all ranges valid that intersect [offset-end] range
5063// TODO : For ranges where there is no alias, we may want to create new buffer ranges that are valid
5064static void SetMemRangesValid(layer_data const *dev_data, DEVICE_MEM_INFO *mem_info, VkDeviceSize offset, VkDeviceSize end) {
5065    bool tmp_bool = false;
5066    MEMORY_RANGE map_range = {};
5067    map_range.linear = true;
5068    map_range.start = offset;
5069    map_range.end = end;
5070    for (auto &handle_range_pair : mem_info->bound_ranges) {
5071        if (rangesIntersect(dev_data, &handle_range_pair.second, &map_range, &tmp_bool, false)) {
5072            // TODO : WARN here if tmp_bool true?
5073            handle_range_pair.second.valid = true;
5074        }
5075    }
5076}
5077
5078static bool ValidateInsertMemoryRange(layer_data const *dev_data, uint64_t handle, DEVICE_MEM_INFO *mem_info,
5079                                      VkDeviceSize memoryOffset, VkMemoryRequirements memRequirements, bool is_image,
5080                                      bool is_linear, const char *api_name) {
5081    bool skip = false;
5082
5083    MEMORY_RANGE range;
5084    range.image = is_image;
5085    range.handle = handle;
5086    range.linear = is_linear;
5087    range.valid = mem_info->global_valid;
5088    range.memory = mem_info->mem;
5089    range.start = memoryOffset;
5090    range.size = memRequirements.size;
5091    range.end = memoryOffset + memRequirements.size - 1;
5092    range.aliases.clear();
5093
5094    // Check for aliasing problems.
5095    for (auto &obj_range_pair : mem_info->bound_ranges) {
5096        auto check_range = &obj_range_pair.second;
5097        bool intersection_error = false;
5098        if (rangesIntersect(dev_data, &range, check_range, &intersection_error, false)) {
5099            skip |= intersection_error;
5100            range.aliases.insert(check_range);
5101        }
5102    }
5103
5104    if (memoryOffset >= mem_info->alloc_info.allocationSize) {
5105        UNIQUE_VALIDATION_ERROR_CODE error_code = is_image ? VALIDATION_ERROR_00805 : VALIDATION_ERROR_00793;
5106        skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5107                       reinterpret_cast<uint64_t &>(mem_info->mem), __LINE__, error_code, "MEM",
5108                       "In %s, attempting to bind memory (0x%" PRIxLEAST64 ") to object (0x%" PRIxLEAST64
5109                       "), memoryOffset=0x%" PRIxLEAST64 " must be less than the memory allocation size 0x%" PRIxLEAST64 ". %s",
5110                       api_name, reinterpret_cast<uint64_t &>(mem_info->mem), handle, memoryOffset,
5111                       mem_info->alloc_info.allocationSize, validation_error_map[error_code]);
5112    }
5113
5114    return skip;
5115}
5116
5117// Object with given handle is being bound to memory w/ given mem_info struct.
5118//  Track the newly bound memory range with given memoryOffset
5119//  Also scan any previous ranges, track aliased ranges with new range, and flag an error if a linear
5120//  and non-linear range incorrectly overlap.
5121// Return true if an error is flagged and the user callback returns "true", otherwise false
5122// is_image indicates an image object, otherwise handle is for a buffer
5123// is_linear indicates a buffer or linear image
5124static void InsertMemoryRange(layer_data const *dev_data, uint64_t handle, DEVICE_MEM_INFO *mem_info, VkDeviceSize memoryOffset,
5125                              VkMemoryRequirements memRequirements, bool is_image, bool is_linear) {
5126    MEMORY_RANGE range;
5127
5128    range.image = is_image;
5129    range.handle = handle;
5130    range.linear = is_linear;
5131    range.valid = mem_info->global_valid;
5132    range.memory = mem_info->mem;
5133    range.start = memoryOffset;
5134    range.size = memRequirements.size;
5135    range.end = memoryOffset + memRequirements.size - 1;
5136    range.aliases.clear();
5137    // Update Memory aliasing
5138    // Save aliased ranges so we can copy into final map entry below. Can't do it in loop b/c we don't yet have final ptr. If we
5139    // inserted into map before loop to get the final ptr, then we may enter loop when not needed & we check range against itself
5140    std::unordered_set<MEMORY_RANGE *> tmp_alias_ranges;
5141    for (auto &obj_range_pair : mem_info->bound_ranges) {
5142        auto check_range = &obj_range_pair.second;
5143        bool intersection_error = false;
5144        if (rangesIntersect(dev_data, &range, check_range, &intersection_error, true)) {
5145            range.aliases.insert(check_range);
5146            tmp_alias_ranges.insert(check_range);
5147        }
5148    }
5149    mem_info->bound_ranges[handle] = std::move(range);
5150    for (auto tmp_range : tmp_alias_ranges) {
5151        tmp_range->aliases.insert(&mem_info->bound_ranges[handle]);
5152    }
5153    if (is_image)
5154        mem_info->bound_images.insert(handle);
5155    else
5156        mem_info->bound_buffers.insert(handle);
5157}
5158
5159static bool ValidateInsertImageMemoryRange(layer_data const *dev_data, VkImage image, DEVICE_MEM_INFO *mem_info,
5160                                           VkDeviceSize mem_offset, VkMemoryRequirements mem_reqs, bool is_linear,
5161                                           const char *api_name) {
5162    return ValidateInsertMemoryRange(dev_data, reinterpret_cast<uint64_t &>(image), mem_info, mem_offset, mem_reqs, true, is_linear,
5163                                     api_name);
5164}
5165static void InsertImageMemoryRange(layer_data const *dev_data, VkImage image, DEVICE_MEM_INFO *mem_info, VkDeviceSize mem_offset,
5166                                   VkMemoryRequirements mem_reqs, bool is_linear) {
5167    InsertMemoryRange(dev_data, reinterpret_cast<uint64_t &>(image), mem_info, mem_offset, mem_reqs, true, is_linear);
5168}
5169
5170static bool ValidateInsertBufferMemoryRange(layer_data const *dev_data, VkBuffer buffer, DEVICE_MEM_INFO *mem_info,
5171                                            VkDeviceSize mem_offset, VkMemoryRequirements mem_reqs, const char *api_name) {
5172    return ValidateInsertMemoryRange(dev_data, reinterpret_cast<uint64_t &>(buffer), mem_info, mem_offset, mem_reqs, false, true,
5173                                     api_name);
5174}
5175static void InsertBufferMemoryRange(layer_data const *dev_data, VkBuffer buffer, DEVICE_MEM_INFO *mem_info, VkDeviceSize mem_offset,
5176                                    VkMemoryRequirements mem_reqs) {
5177    InsertMemoryRange(dev_data, reinterpret_cast<uint64_t &>(buffer), mem_info, mem_offset, mem_reqs, false, true);
5178}
5179
5180// Remove MEMORY_RANGE struct for give handle from bound_ranges of mem_info
5181//  is_image indicates if handle is for image or buffer
5182//  This function will also remove the handle-to-index mapping from the appropriate
5183//  map and clean up any aliases for range being removed.
5184static void RemoveMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info, bool is_image) {
5185    auto erase_range = &mem_info->bound_ranges[handle];
5186    for (auto alias_range : erase_range->aliases) {
5187        alias_range->aliases.erase(erase_range);
5188    }
5189    erase_range->aliases.clear();
5190    mem_info->bound_ranges.erase(handle);
5191    if (is_image) {
5192        mem_info->bound_images.erase(handle);
5193    } else {
5194        mem_info->bound_buffers.erase(handle);
5195    }
5196}
5197
5198void RemoveBufferMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info) { RemoveMemoryRange(handle, mem_info, false); }
5199
5200void RemoveImageMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info) { RemoveMemoryRange(handle, mem_info, true); }
5201
5202VKAPI_ATTR void VKAPI_CALL DestroyBuffer(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks *pAllocator) {
5203    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5204    BUFFER_STATE *buffer_state = nullptr;
5205    VK_OBJECT obj_struct;
5206    std::unique_lock<std::mutex> lock(global_lock);
5207    bool skip = PreCallValidateDestroyBuffer(dev_data, buffer, &buffer_state, &obj_struct);
5208    if (!skip) {
5209        lock.unlock();
5210        dev_data->dispatch_table.DestroyBuffer(device, buffer, pAllocator);
5211        lock.lock();
5212        if (buffer != VK_NULL_HANDLE) {
5213            PostCallRecordDestroyBuffer(dev_data, buffer, buffer_state, obj_struct);
5214        }
5215    }
5216}
5217
5218VKAPI_ATTR void VKAPI_CALL DestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator) {
5219    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5220    // Common data objects used pre & post call
5221    BUFFER_VIEW_STATE *buffer_view_state = nullptr;
5222    VK_OBJECT obj_struct;
5223    std::unique_lock<std::mutex> lock(global_lock);
5224    // Validate state before calling down chain, update common data if we'll be calling down chain
5225    bool skip = PreCallValidateDestroyBufferView(dev_data, bufferView, &buffer_view_state, &obj_struct);
5226    if (!skip) {
5227        lock.unlock();
5228        dev_data->dispatch_table.DestroyBufferView(device, bufferView, pAllocator);
5229        lock.lock();
5230        if (bufferView != VK_NULL_HANDLE) {
5231            PostCallRecordDestroyBufferView(dev_data, bufferView, buffer_view_state, obj_struct);
5232        }
5233    }
5234}
5235
5236VKAPI_ATTR void VKAPI_CALL DestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
5237    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5238    IMAGE_STATE *image_state = nullptr;
5239    VK_OBJECT obj_struct;
5240    std::unique_lock<std::mutex> lock(global_lock);
5241    bool skip = PreCallValidateDestroyImage(dev_data, image, &image_state, &obj_struct);
5242    if (!skip) {
5243        lock.unlock();
5244        dev_data->dispatch_table.DestroyImage(device, image, pAllocator);
5245        lock.lock();
5246        if (image != VK_NULL_HANDLE) {
5247            PostCallRecordDestroyImage(dev_data, image, image_state, obj_struct);
5248        }
5249    }
5250}
5251
5252static bool ValidateMemoryTypes(const layer_data *dev_data, const DEVICE_MEM_INFO *mem_info, const uint32_t memory_type_bits,
5253                                const char *funcName, UNIQUE_VALIDATION_ERROR_CODE msgCode) {
5254    bool skip = false;
5255    if (((1 << mem_info->alloc_info.memoryTypeIndex) & memory_type_bits) == 0) {
5256        skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5257                       reinterpret_cast<const uint64_t &>(mem_info->mem), __LINE__, msgCode, "MT",
5258                       "%s(): MemoryRequirements->memoryTypeBits (0x%X) for this object type are not compatible with the memory "
5259                       "type (0x%X) of this memory object 0x%" PRIx64 ". %s",
5260                       funcName, memory_type_bits, mem_info->alloc_info.memoryTypeIndex,
5261                       reinterpret_cast<const uint64_t &>(mem_info->mem), validation_error_map[msgCode]);
5262    }
5263    return skip;
5264}
5265
5266static bool PreCallValidateBindBufferMemory(layer_data *dev_data, VkBuffer buffer, BUFFER_STATE *buffer_state, VkDeviceMemory mem,
5267                                            VkDeviceSize memoryOffset) {
5268    bool skip = false;
5269    if (buffer_state) {
5270        std::unique_lock<std::mutex> lock(global_lock);
5271        // Track objects tied to memory
5272        uint64_t buffer_handle = reinterpret_cast<uint64_t &>(buffer);
5273        skip = ValidateSetMemBinding(dev_data, mem, buffer_handle, kVulkanObjectTypeBuffer, "vkBindBufferMemory()");
5274        if (!buffer_state->memory_requirements_checked) {
5275            // There's not an explicit requirement in the spec to call vkGetBufferMemoryRequirements() prior to calling
5276            // BindBufferMemory, but it's implied in that memory being bound must conform with VkMemoryRequirements from
5277            // vkGetBufferMemoryRequirements()
5278            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5279                            buffer_handle, __LINE__, DRAWSTATE_INVALID_BUFFER, "DS",
5280                            "vkBindBufferMemory(): Binding memory to buffer 0x%" PRIxLEAST64
5281                            " but vkGetBufferMemoryRequirements() has not been called on that buffer.",
5282                            buffer_handle);
5283            // Make the call for them so we can verify the state
5284            lock.unlock();
5285            dev_data->dispatch_table.GetBufferMemoryRequirements(dev_data->device, buffer, &buffer_state->requirements);
5286            lock.lock();
5287        }
5288
5289        // Validate bound memory range information
5290        auto mem_info = GetMemObjInfo(dev_data, mem);
5291        if (mem_info) {
5292            skip |= ValidateInsertBufferMemoryRange(dev_data, buffer, mem_info, memoryOffset, buffer_state->requirements,
5293                                                    "vkBindBufferMemory()");
5294            skip |= ValidateMemoryTypes(dev_data, mem_info, buffer_state->requirements.memoryTypeBits, "vkBindBufferMemory()",
5295                                        VALIDATION_ERROR_00797);
5296        }
5297
5298        // Validate memory requirements alignment
5299        if (SafeModulo(memoryOffset, buffer_state->requirements.alignment) != 0) {
5300            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5301                            buffer_handle, __LINE__, VALIDATION_ERROR_02174, "DS",
5302                            "vkBindBufferMemory(): memoryOffset is 0x%" PRIxLEAST64
5303                            " but must be an integer multiple of the "
5304                            "VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
5305                            ", returned from a call to vkGetBufferMemoryRequirements with buffer. %s",
5306                            memoryOffset, buffer_state->requirements.alignment, validation_error_map[VALIDATION_ERROR_02174]);
5307        }
5308
5309        // Validate memory requirements size
5310        if (buffer_state->requirements.size > (mem_info->alloc_info.allocationSize - memoryOffset)) {
5311            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5312                            buffer_handle, __LINE__, VALIDATION_ERROR_02175, "DS",
5313                            "vkBindBufferMemory(): memory size minus memoryOffset is 0x%" PRIxLEAST64
5314                            " but must be at least as large as "
5315                            "VkMemoryRequirements::size value 0x%" PRIxLEAST64
5316                            ", returned from a call to vkGetBufferMemoryRequirements with buffer. %s",
5317                            mem_info->alloc_info.allocationSize - memoryOffset, buffer_state->requirements.size,
5318                            validation_error_map[VALIDATION_ERROR_02175]);
5319        }
5320
5321        // Validate device limits alignments
5322        static const VkBufferUsageFlagBits usage_list[3] = {
5323            static_cast<VkBufferUsageFlagBits>(VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT),
5324            VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT};
5325        static const char *memory_type[3] = {"texel", "uniform", "storage"};
5326        static const char *offset_name[3] = {"minTexelBufferOffsetAlignment", "minUniformBufferOffsetAlignment",
5327                                             "minStorageBufferOffsetAlignment"};
5328
5329        // TODO:  vk_validation_stats.py cannot abide braces immediately preceding or following a validation error enum
5330        // clang-format off
5331        static const UNIQUE_VALIDATION_ERROR_CODE msgCode[3] = { VALIDATION_ERROR_00794, VALIDATION_ERROR_00795,
5332            VALIDATION_ERROR_00796 };
5333        // clang-format on
5334
5335        // Keep this one fresh!
5336        const VkDeviceSize offset_requirement[3] = {
5337            dev_data->phys_dev_properties.properties.limits.minTexelBufferOffsetAlignment,
5338            dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment,
5339            dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment};
5340        VkBufferUsageFlags usage = dev_data->bufferMap[buffer].get()->createInfo.usage;
5341
5342        for (int i = 0; i < 3; i++) {
5343            if (usage & usage_list[i]) {
5344                if (SafeModulo(memoryOffset, offset_requirement[i]) != 0) {
5345                    skip |= log_msg(
5346                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, buffer_handle,
5347                        __LINE__, msgCode[i], "DS", "vkBindBufferMemory(): %s memoryOffset is 0x%" PRIxLEAST64
5348                                                    " but must be a multiple of "
5349                                                    "device limit %s 0x%" PRIxLEAST64 ". %s",
5350                        memory_type[i], memoryOffset, offset_name[i], offset_requirement[i], validation_error_map[msgCode[i]]);
5351                }
5352            }
5353        }
5354    }
5355    return skip;
5356}
5357
5358static void PostCallRecordBindBufferMemory(layer_data *dev_data, VkBuffer buffer, BUFFER_STATE *buffer_state, VkDeviceMemory mem,
5359                                           VkDeviceSize memoryOffset) {
5360    if (buffer_state) {
5361        std::unique_lock<std::mutex> lock(global_lock);
5362        // Track bound memory range information
5363        auto mem_info = GetMemObjInfo(dev_data, mem);
5364        if (mem_info) {
5365            InsertBufferMemoryRange(dev_data, buffer, mem_info, memoryOffset, buffer_state->requirements);
5366        }
5367
5368        // Track objects tied to memory
5369        uint64_t buffer_handle = reinterpret_cast<uint64_t &>(buffer);
5370        SetMemBinding(dev_data, mem, buffer_handle, kVulkanObjectTypeBuffer, "vkBindBufferMemory()");
5371
5372        buffer_state->binding.mem = mem;
5373        buffer_state->binding.offset = memoryOffset;
5374        buffer_state->binding.size = buffer_state->requirements.size;
5375    }
5376}
5377
5378VKAPI_ATTR VkResult VKAPI_CALL BindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
5379    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5380    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5381    auto buffer_state = GetBufferState(dev_data, buffer);
5382    bool skip = PreCallValidateBindBufferMemory(dev_data, buffer, buffer_state, mem, memoryOffset);
5383    if (!skip) {
5384        result = dev_data->dispatch_table.BindBufferMemory(device, buffer, mem, memoryOffset);
5385        if (result == VK_SUCCESS) {
5386            PostCallRecordBindBufferMemory(dev_data, buffer, buffer_state, mem, memoryOffset);
5387        }
5388    }
5389    return result;
5390}
5391
5392VKAPI_ATTR void VKAPI_CALL GetBufferMemoryRequirements(VkDevice device, VkBuffer buffer,
5393                                                       VkMemoryRequirements *pMemoryRequirements) {
5394    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5395    dev_data->dispatch_table.GetBufferMemoryRequirements(device, buffer, pMemoryRequirements);
5396    auto buffer_state = GetBufferState(dev_data, buffer);
5397    if (buffer_state) {
5398        buffer_state->requirements = *pMemoryRequirements;
5399        buffer_state->memory_requirements_checked = true;
5400    }
5401}
5402
5403VKAPI_ATTR void VKAPI_CALL GetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements *pMemoryRequirements) {
5404    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5405    dev_data->dispatch_table.GetImageMemoryRequirements(device, image, pMemoryRequirements);
5406    auto image_state = GetImageState(dev_data, image);
5407    if (image_state) {
5408        image_state->requirements = *pMemoryRequirements;
5409        image_state->memory_requirements_checked = true;
5410    }
5411}
5412
5413VKAPI_ATTR void VKAPI_CALL DestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator) {
5414    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5415    // Common data objects used pre & post call
5416    IMAGE_VIEW_STATE *image_view_state = nullptr;
5417    VK_OBJECT obj_struct;
5418    std::unique_lock<std::mutex> lock(global_lock);
5419    bool skip = PreCallValidateDestroyImageView(dev_data, imageView, &image_view_state, &obj_struct);
5420    if (!skip) {
5421        lock.unlock();
5422        dev_data->dispatch_table.DestroyImageView(device, imageView, pAllocator);
5423        lock.lock();
5424        if (imageView != VK_NULL_HANDLE) {
5425            PostCallRecordDestroyImageView(dev_data, imageView, image_view_state, obj_struct);
5426        }
5427    }
5428}
5429
5430VKAPI_ATTR void VKAPI_CALL DestroyShaderModule(VkDevice device, VkShaderModule shaderModule,
5431                                               const VkAllocationCallbacks *pAllocator) {
5432    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5433
5434    std::unique_lock<std::mutex> lock(global_lock);
5435    dev_data->shaderModuleMap.erase(shaderModule);
5436    lock.unlock();
5437
5438    dev_data->dispatch_table.DestroyShaderModule(device, shaderModule, pAllocator);
5439}
5440
5441static bool PreCallValidateDestroyPipeline(layer_data *dev_data, VkPipeline pipeline, PIPELINE_STATE **pipeline_state,
5442                                           VK_OBJECT *obj_struct) {
5443    *pipeline_state = getPipelineState(dev_data, pipeline);
5444    *obj_struct = {reinterpret_cast<uint64_t &>(pipeline), kVulkanObjectTypePipeline};
5445    if (dev_data->instance_data->disabled.destroy_pipeline) return false;
5446    bool skip = false;
5447    if (*pipeline_state) {
5448        skip |= ValidateObjectNotInUse(dev_data, *pipeline_state, *obj_struct, VALIDATION_ERROR_00555);
5449    }
5450    return skip;
5451}
5452
5453static void PostCallRecordDestroyPipeline(layer_data *dev_data, VkPipeline pipeline, PIPELINE_STATE *pipeline_state,
5454                                          VK_OBJECT obj_struct) {
5455    // Any bound cmd buffers are now invalid
5456    invalidateCommandBuffers(dev_data, pipeline_state->cb_bindings, obj_struct);
5457    dev_data->pipelineMap.erase(pipeline);
5458}
5459
5460VKAPI_ATTR void VKAPI_CALL DestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) {
5461    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5462    PIPELINE_STATE *pipeline_state = nullptr;
5463    VK_OBJECT obj_struct;
5464    std::unique_lock<std::mutex> lock(global_lock);
5465    bool skip = PreCallValidateDestroyPipeline(dev_data, pipeline, &pipeline_state, &obj_struct);
5466    if (!skip) {
5467        lock.unlock();
5468        dev_data->dispatch_table.DestroyPipeline(device, pipeline, pAllocator);
5469        lock.lock();
5470        if (pipeline != VK_NULL_HANDLE) {
5471            PostCallRecordDestroyPipeline(dev_data, pipeline, pipeline_state, obj_struct);
5472        }
5473    }
5474}
5475
5476VKAPI_ATTR void VKAPI_CALL DestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout,
5477                                                 const VkAllocationCallbacks *pAllocator) {
5478    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5479    std::unique_lock<std::mutex> lock(global_lock);
5480    dev_data->pipelineLayoutMap.erase(pipelineLayout);
5481    lock.unlock();
5482
5483    dev_data->dispatch_table.DestroyPipelineLayout(device, pipelineLayout, pAllocator);
5484}
5485
5486static bool PreCallValidateDestroySampler(layer_data *dev_data, VkSampler sampler, SAMPLER_STATE **sampler_state,
5487                                          VK_OBJECT *obj_struct) {
5488    *sampler_state = GetSamplerState(dev_data, sampler);
5489    *obj_struct = {reinterpret_cast<uint64_t &>(sampler), kVulkanObjectTypeSampler};
5490    if (dev_data->instance_data->disabled.destroy_sampler) return false;
5491    bool skip = false;
5492    if (*sampler_state) {
5493        skip |= ValidateObjectNotInUse(dev_data, *sampler_state, *obj_struct, VALIDATION_ERROR_00837);
5494    }
5495    return skip;
5496}
5497
5498static void PostCallRecordDestroySampler(layer_data *dev_data, VkSampler sampler, SAMPLER_STATE *sampler_state,
5499                                         VK_OBJECT obj_struct) {
5500    // Any bound cmd buffers are now invalid
5501    if (sampler_state) invalidateCommandBuffers(dev_data, sampler_state->cb_bindings, obj_struct);
5502    dev_data->samplerMap.erase(sampler);
5503}
5504
5505VKAPI_ATTR void VKAPI_CALL DestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) {
5506    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5507    SAMPLER_STATE *sampler_state = nullptr;
5508    VK_OBJECT obj_struct;
5509    std::unique_lock<std::mutex> lock(global_lock);
5510    bool skip = PreCallValidateDestroySampler(dev_data, sampler, &sampler_state, &obj_struct);
5511    if (!skip) {
5512        lock.unlock();
5513        dev_data->dispatch_table.DestroySampler(device, sampler, pAllocator);
5514        lock.lock();
5515        if (sampler != VK_NULL_HANDLE) {
5516            PostCallRecordDestroySampler(dev_data, sampler, sampler_state, obj_struct);
5517        }
5518    }
5519}
5520
5521static void PostCallRecordDestroyDescriptorSetLayout(layer_data *dev_data, VkDescriptorSetLayout ds_layout) {
5522    dev_data->descriptorSetLayoutMap.erase(ds_layout);
5523}
5524
5525VKAPI_ATTR void VKAPI_CALL DestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout,
5526                                                      const VkAllocationCallbacks *pAllocator) {
5527    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5528    dev_data->dispatch_table.DestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator);
5529    std::unique_lock<std::mutex> lock(global_lock);
5530    PostCallRecordDestroyDescriptorSetLayout(dev_data, descriptorSetLayout);
5531}
5532
5533static bool PreCallValidateDestroyDescriptorPool(layer_data *dev_data, VkDescriptorPool pool,
5534                                                 DESCRIPTOR_POOL_STATE **desc_pool_state, VK_OBJECT *obj_struct) {
5535    *desc_pool_state = GetDescriptorPoolState(dev_data, pool);
5536    *obj_struct = {reinterpret_cast<uint64_t &>(pool), kVulkanObjectTypeDescriptorPool};
5537    if (dev_data->instance_data->disabled.destroy_descriptor_pool) return false;
5538    bool skip = false;
5539    if (*desc_pool_state) {
5540        skip |= ValidateObjectNotInUse(dev_data, *desc_pool_state, *obj_struct, VALIDATION_ERROR_00901);
5541    }
5542    return skip;
5543}
5544
5545static void PostCallRecordDestroyDescriptorPool(layer_data *dev_data, VkDescriptorPool descriptorPool,
5546                                                DESCRIPTOR_POOL_STATE *desc_pool_state, VK_OBJECT obj_struct) {
5547    // Any bound cmd buffers are now invalid
5548    invalidateCommandBuffers(dev_data, desc_pool_state->cb_bindings, obj_struct);
5549    // Free sets that were in this pool
5550    for (auto ds : desc_pool_state->sets) {
5551        freeDescriptorSet(dev_data, ds);
5552    }
5553    dev_data->descriptorPoolMap.erase(descriptorPool);
5554}
5555
5556VKAPI_ATTR void VKAPI_CALL DestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
5557                                                 const VkAllocationCallbacks *pAllocator) {
5558    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5559    DESCRIPTOR_POOL_STATE *desc_pool_state = nullptr;
5560    VK_OBJECT obj_struct;
5561    std::unique_lock<std::mutex> lock(global_lock);
5562    bool skip = PreCallValidateDestroyDescriptorPool(dev_data, descriptorPool, &desc_pool_state, &obj_struct);
5563    if (!skip) {
5564        lock.unlock();
5565        dev_data->dispatch_table.DestroyDescriptorPool(device, descriptorPool, pAllocator);
5566        lock.lock();
5567        if (descriptorPool != VK_NULL_HANDLE) {
5568            PostCallRecordDestroyDescriptorPool(dev_data, descriptorPool, desc_pool_state, obj_struct);
5569        }
5570    }
5571}
5572// Verify cmdBuffer in given cb_node is not in global in-flight set, and return skip result
5573//  If this is a secondary command buffer, then make sure its primary is also in-flight
5574//  If primary is not in-flight, then remove secondary from global in-flight set
5575// This function is only valid at a point when cmdBuffer is being reset or freed
5576static bool checkCommandBufferInFlight(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const char *action,
5577                                       UNIQUE_VALIDATION_ERROR_CODE error_code) {
5578    bool skip = false;
5579    if (dev_data->globalInFlightCmdBuffers.count(cb_node->commandBuffer)) {
5580        // Primary CB or secondary where primary is also in-flight is an error
5581        if ((cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_SECONDARY) ||
5582            (dev_data->globalInFlightCmdBuffers.count(cb_node->primaryCommandBuffer))) {
5583            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5584                            reinterpret_cast<const uint64_t &>(cb_node->commandBuffer), __LINE__, error_code, "DS",
5585                            "Attempt to %s command buffer (0x%p) which is in use. %s", action, cb_node->commandBuffer,
5586                            validation_error_map[error_code]);
5587        }
5588    }
5589    return skip;
5590}
5591
5592// Iterate over all cmdBuffers in given commandPool and verify that each is not in use
5593static bool checkCommandBuffersInFlight(layer_data *dev_data, COMMAND_POOL_NODE *pPool, const char *action,
5594                                        UNIQUE_VALIDATION_ERROR_CODE error_code) {
5595    bool skip = false;
5596    for (auto cmd_buffer : pPool->commandBuffers) {
5597        if (dev_data->globalInFlightCmdBuffers.count(cmd_buffer)) {
5598            skip |= checkCommandBufferInFlight(dev_data, GetCBNode(dev_data, cmd_buffer), action, error_code);
5599        }
5600    }
5601    return skip;
5602}
5603
5604static void clearCommandBuffersInFlight(layer_data *dev_data, COMMAND_POOL_NODE *pPool) {
5605    for (auto cmd_buffer : pPool->commandBuffers) {
5606        dev_data->globalInFlightCmdBuffers.erase(cmd_buffer);
5607    }
5608}
5609
5610VKAPI_ATTR void VKAPI_CALL FreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount,
5611                                              const VkCommandBuffer *pCommandBuffers) {
5612    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5613    bool skip = false;
5614    std::unique_lock<std::mutex> lock(global_lock);
5615
5616    for (uint32_t i = 0; i < commandBufferCount; i++) {
5617        auto cb_node = GetCBNode(dev_data, pCommandBuffers[i]);
5618        // Delete CB information structure, and remove from commandBufferMap
5619        if (cb_node) {
5620            skip |= checkCommandBufferInFlight(dev_data, cb_node, "free", VALIDATION_ERROR_00096);
5621        }
5622    }
5623
5624    if (skip) return;
5625
5626    auto pPool = GetCommandPoolNode(dev_data, commandPool);
5627    for (uint32_t i = 0; i < commandBufferCount; i++) {
5628        auto cb_node = GetCBNode(dev_data, pCommandBuffers[i]);
5629        // Delete CB information structure, and remove from commandBufferMap
5630        if (cb_node) {
5631            dev_data->globalInFlightCmdBuffers.erase(cb_node->commandBuffer);
5632            // reset prior to delete for data clean-up
5633            resetCB(dev_data, cb_node->commandBuffer);
5634            dev_data->commandBufferMap.erase(cb_node->commandBuffer);
5635            delete cb_node;
5636        }
5637
5638        // Remove commandBuffer reference from commandPoolMap
5639        pPool->commandBuffers.remove(pCommandBuffers[i]);
5640    }
5641    lock.unlock();
5642
5643    dev_data->dispatch_table.FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);
5644}
5645
5646VKAPI_ATTR VkResult VKAPI_CALL CreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
5647                                                 const VkAllocationCallbacks *pAllocator, VkCommandPool *pCommandPool) {
5648    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5649
5650    VkResult result = dev_data->dispatch_table.CreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool);
5651
5652    if (VK_SUCCESS == result) {
5653        std::lock_guard<std::mutex> lock(global_lock);
5654        dev_data->commandPoolMap[*pCommandPool].createFlags = pCreateInfo->flags;
5655        dev_data->commandPoolMap[*pCommandPool].queueFamilyIndex = pCreateInfo->queueFamilyIndex;
5656    }
5657    return result;
5658}
5659
5660VKAPI_ATTR VkResult VKAPI_CALL CreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
5661                                               const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) {
5662    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5663    bool skip = false;
5664    if (pCreateInfo && pCreateInfo->queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS) {
5665        if (!dev_data->enabled_features.pipelineStatisticsQuery) {
5666            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
5667                            __LINE__, VALIDATION_ERROR_01006, "DS",
5668                            "Query pool with type VK_QUERY_TYPE_PIPELINE_STATISTICS created on a device "
5669                            "with VkDeviceCreateInfo.pEnabledFeatures.pipelineStatisticsQuery == VK_FALSE. %s",
5670                            validation_error_map[VALIDATION_ERROR_01006]);
5671        }
5672    }
5673
5674    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5675    if (!skip) {
5676        result = dev_data->dispatch_table.CreateQueryPool(device, pCreateInfo, pAllocator, pQueryPool);
5677    }
5678    if (result == VK_SUCCESS) {
5679        std::lock_guard<std::mutex> lock(global_lock);
5680        QUERY_POOL_NODE *qp_node = &dev_data->queryPoolMap[*pQueryPool];
5681        qp_node->createInfo = *pCreateInfo;
5682    }
5683    return result;
5684}
5685
5686static bool PreCallValidateDestroyCommandPool(layer_data *dev_data, VkCommandPool pool, COMMAND_POOL_NODE **cp_state) {
5687    *cp_state = GetCommandPoolNode(dev_data, pool);
5688    if (dev_data->instance_data->disabled.destroy_command_pool) return false;
5689    bool skip = false;
5690    if (*cp_state) {
5691        // Verify that command buffers in pool are complete (not in-flight)
5692        skip |= checkCommandBuffersInFlight(dev_data, *cp_state, "destroy command pool with", VALIDATION_ERROR_00077);
5693    }
5694    return skip;
5695}
5696
5697static void PostCallRecordDestroyCommandPool(layer_data *dev_data, VkCommandPool pool, COMMAND_POOL_NODE *cp_state) {
5698    // Must remove cmdpool from cmdpoolmap, after removing all cmdbuffers in its list from the commandBufferMap
5699    clearCommandBuffersInFlight(dev_data, cp_state);
5700    for (auto cb : cp_state->commandBuffers) {
5701        auto cb_node = GetCBNode(dev_data, cb);
5702        clear_cmd_buf_and_mem_references(dev_data, cb_node);
5703        // Remove references to this cb_node prior to delete
5704        // TODO : Need better solution here, resetCB?
5705        for (auto obj : cb_node->object_bindings) {
5706            removeCommandBufferBinding(dev_data, &obj, cb_node);
5707        }
5708        for (auto framebuffer : cb_node->framebuffers) {
5709            auto fb_state = GetFramebufferState(dev_data, framebuffer);
5710            if (fb_state) fb_state->cb_bindings.erase(cb_node);
5711        }
5712        dev_data->commandBufferMap.erase(cb);  // Remove this command buffer
5713        delete cb_node;                        // delete CB info structure
5714    }
5715    dev_data->commandPoolMap.erase(pool);
5716}
5717
5718// Destroy commandPool along with all of the commandBuffers allocated from that pool
5719VKAPI_ATTR void VKAPI_CALL DestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
5720    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5721    COMMAND_POOL_NODE *cp_state = nullptr;
5722    std::unique_lock<std::mutex> lock(global_lock);
5723    bool skip = PreCallValidateDestroyCommandPool(dev_data, commandPool, &cp_state);
5724    if (!skip) {
5725        lock.unlock();
5726        dev_data->dispatch_table.DestroyCommandPool(device, commandPool, pAllocator);
5727        lock.lock();
5728        if (commandPool != VK_NULL_HANDLE) {
5729            PostCallRecordDestroyCommandPool(dev_data, commandPool, cp_state);
5730        }
5731    }
5732}
5733
5734VKAPI_ATTR VkResult VKAPI_CALL ResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) {
5735    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5736    bool skip = false;
5737
5738    std::unique_lock<std::mutex> lock(global_lock);
5739    auto pPool = GetCommandPoolNode(dev_data, commandPool);
5740    skip |= checkCommandBuffersInFlight(dev_data, pPool, "reset command pool with", VALIDATION_ERROR_00072);
5741    lock.unlock();
5742
5743    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
5744
5745    VkResult result = dev_data->dispatch_table.ResetCommandPool(device, commandPool, flags);
5746
5747    // Reset all of the CBs allocated from this pool
5748    if (VK_SUCCESS == result) {
5749        lock.lock();
5750        clearCommandBuffersInFlight(dev_data, pPool);
5751        for (auto cmdBuffer : pPool->commandBuffers) {
5752            resetCB(dev_data, cmdBuffer);
5753        }
5754        lock.unlock();
5755    }
5756    return result;
5757}
5758
5759VKAPI_ATTR VkResult VKAPI_CALL ResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) {
5760    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5761    bool skip = false;
5762    std::unique_lock<std::mutex> lock(global_lock);
5763    for (uint32_t i = 0; i < fenceCount; ++i) {
5764        auto pFence = GetFenceNode(dev_data, pFences[i]);
5765        if (pFence && pFence->state == FENCE_INFLIGHT) {
5766            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5767                            reinterpret_cast<const uint64_t &>(pFences[i]), __LINE__, VALIDATION_ERROR_00183, "DS",
5768                            "Fence 0x%" PRIx64 " is in use. %s", reinterpret_cast<const uint64_t &>(pFences[i]),
5769                            validation_error_map[VALIDATION_ERROR_00183]);
5770        }
5771    }
5772    lock.unlock();
5773
5774    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
5775
5776    VkResult result = dev_data->dispatch_table.ResetFences(device, fenceCount, pFences);
5777
5778    if (result == VK_SUCCESS) {
5779        lock.lock();
5780        for (uint32_t i = 0; i < fenceCount; ++i) {
5781            auto pFence = GetFenceNode(dev_data, pFences[i]);
5782            if (pFence) {
5783                pFence->state = FENCE_UNSIGNALED;
5784            }
5785        }
5786        lock.unlock();
5787    }
5788
5789    return result;
5790}
5791
5792// For given cb_nodes, invalidate them and track object causing invalidation
5793void invalidateCommandBuffers(const layer_data *dev_data, std::unordered_set<GLOBAL_CB_NODE *> const &cb_nodes, VK_OBJECT obj) {
5794    for (auto cb_node : cb_nodes) {
5795        if (cb_node->state == CB_RECORDING) {
5796            log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5797                    (uint64_t)(cb_node->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
5798                    "Invalidating a command buffer that's currently being recorded: 0x%p.", cb_node->commandBuffer);
5799        }
5800        cb_node->state = CB_INVALID;
5801        cb_node->broken_bindings.push_back(obj);
5802    }
5803}
5804
5805static bool PreCallValidateDestroyFramebuffer(layer_data *dev_data, VkFramebuffer framebuffer,
5806                                              FRAMEBUFFER_STATE **framebuffer_state, VK_OBJECT *obj_struct) {
5807    *framebuffer_state = GetFramebufferState(dev_data, framebuffer);
5808    *obj_struct = {reinterpret_cast<uint64_t &>(framebuffer), kVulkanObjectTypeFramebuffer};
5809    if (dev_data->instance_data->disabled.destroy_framebuffer) return false;
5810    bool skip = false;
5811    if (*framebuffer_state) {
5812        skip |= ValidateObjectNotInUse(dev_data, *framebuffer_state, *obj_struct, VALIDATION_ERROR_00422);
5813    }
5814    return skip;
5815}
5816
5817static void PostCallRecordDestroyFramebuffer(layer_data *dev_data, VkFramebuffer framebuffer, FRAMEBUFFER_STATE *framebuffer_state,
5818                                             VK_OBJECT obj_struct) {
5819    invalidateCommandBuffers(dev_data, framebuffer_state->cb_bindings, obj_struct);
5820    dev_data->frameBufferMap.erase(framebuffer);
5821}
5822
5823VKAPI_ATTR void VKAPI_CALL DestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) {
5824    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5825    FRAMEBUFFER_STATE *framebuffer_state = nullptr;
5826    VK_OBJECT obj_struct;
5827    std::unique_lock<std::mutex> lock(global_lock);
5828    bool skip = PreCallValidateDestroyFramebuffer(dev_data, framebuffer, &framebuffer_state, &obj_struct);
5829    if (!skip) {
5830        lock.unlock();
5831        dev_data->dispatch_table.DestroyFramebuffer(device, framebuffer, pAllocator);
5832        lock.lock();
5833        if (framebuffer != VK_NULL_HANDLE) {
5834            PostCallRecordDestroyFramebuffer(dev_data, framebuffer, framebuffer_state, obj_struct);
5835        }
5836    }
5837}
5838
5839static bool PreCallValidateDestroyRenderPass(layer_data *dev_data, VkRenderPass render_pass, RENDER_PASS_STATE **rp_state,
5840                                             VK_OBJECT *obj_struct) {
5841    *rp_state = GetRenderPassState(dev_data, render_pass);
5842    *obj_struct = {reinterpret_cast<uint64_t &>(render_pass), kVulkanObjectTypeRenderPass};
5843    if (dev_data->instance_data->disabled.destroy_renderpass) return false;
5844    bool skip = false;
5845    if (*rp_state) {
5846        skip |= ValidateObjectNotInUse(dev_data, *rp_state, *obj_struct, VALIDATION_ERROR_00393);
5847    }
5848    return skip;
5849}
5850
5851static void PostCallRecordDestroyRenderPass(layer_data *dev_data, VkRenderPass render_pass, RENDER_PASS_STATE *rp_state,
5852                                            VK_OBJECT obj_struct) {
5853    invalidateCommandBuffers(dev_data, rp_state->cb_bindings, obj_struct);
5854    dev_data->renderPassMap.erase(render_pass);
5855}
5856
5857VKAPI_ATTR void VKAPI_CALL DestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) {
5858    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5859    RENDER_PASS_STATE *rp_state = nullptr;
5860    VK_OBJECT obj_struct;
5861    std::unique_lock<std::mutex> lock(global_lock);
5862    bool skip = PreCallValidateDestroyRenderPass(dev_data, renderPass, &rp_state, &obj_struct);
5863    if (!skip) {
5864        lock.unlock();
5865        dev_data->dispatch_table.DestroyRenderPass(device, renderPass, pAllocator);
5866        lock.lock();
5867        if (renderPass != VK_NULL_HANDLE) {
5868            PostCallRecordDestroyRenderPass(dev_data, renderPass, rp_state, obj_struct);
5869        }
5870    }
5871}
5872
5873VKAPI_ATTR VkResult VKAPI_CALL CreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo,
5874                                            const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) {
5875    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5876    std::unique_lock<std::mutex> lock(global_lock);
5877    bool skip = PreCallValidateCreateBuffer(dev_data, pCreateInfo);
5878    lock.unlock();
5879
5880    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
5881    VkResult result = dev_data->dispatch_table.CreateBuffer(device, pCreateInfo, pAllocator, pBuffer);
5882
5883    if (VK_SUCCESS == result) {
5884        lock.lock();
5885        PostCallRecordCreateBuffer(dev_data, pCreateInfo, pBuffer);
5886        lock.unlock();
5887    }
5888    return result;
5889}
5890
5891VKAPI_ATTR VkResult VKAPI_CALL CreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo,
5892                                                const VkAllocationCallbacks *pAllocator, VkBufferView *pView) {
5893    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5894    std::unique_lock<std::mutex> lock(global_lock);
5895    bool skip = PreCallValidateCreateBufferView(dev_data, pCreateInfo);
5896    lock.unlock();
5897    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
5898    VkResult result = dev_data->dispatch_table.CreateBufferView(device, pCreateInfo, pAllocator, pView);
5899    if (VK_SUCCESS == result) {
5900        lock.lock();
5901        PostCallRecordCreateBufferView(dev_data, pCreateInfo, pView);
5902        lock.unlock();
5903    }
5904    return result;
5905}
5906
5907// Access helper functions for external modules
5908const VkFormatProperties *GetFormatProperties(core_validation::layer_data *device_data, VkFormat format) {
5909    VkFormatProperties *format_properties = new VkFormatProperties;
5910    instance_layer_data *instance_data =
5911        GetLayerDataPtr(get_dispatch_key(device_data->instance_data->instance), instance_layer_data_map);
5912    instance_data->dispatch_table.GetPhysicalDeviceFormatProperties(device_data->physical_device, format, format_properties);
5913    return format_properties;
5914}
5915
5916const VkImageFormatProperties *GetImageFormatProperties(core_validation::layer_data *device_data, VkFormat format,
5917                                                        VkImageType image_type, VkImageTiling tiling, VkImageUsageFlags usage,
5918                                                        VkImageCreateFlags flags) {
5919    VkImageFormatProperties *image_format_properties = new VkImageFormatProperties;
5920    instance_layer_data *instance_data =
5921        GetLayerDataPtr(get_dispatch_key(device_data->instance_data->instance), instance_layer_data_map);
5922    instance_data->dispatch_table.GetPhysicalDeviceImageFormatProperties(device_data->physical_device, format, image_type, tiling,
5923                                                                         usage, flags, image_format_properties);
5924    return image_format_properties;
5925}
5926
5927const debug_report_data *GetReportData(const core_validation::layer_data *device_data) { return device_data->report_data; }
5928
5929const VkPhysicalDeviceProperties *GetPhysicalDeviceProperties(core_validation::layer_data *device_data) {
5930    return &device_data->phys_dev_props;
5931}
5932
5933const CHECK_DISABLED *GetDisables(core_validation::layer_data *device_data) { return &device_data->instance_data->disabled; }
5934
5935std::unordered_map<VkImage, std::unique_ptr<IMAGE_STATE>> *GetImageMap(core_validation::layer_data *device_data) {
5936    return &device_data->imageMap;
5937}
5938
5939std::unordered_map<VkImage, std::vector<ImageSubresourcePair>> *GetImageSubresourceMap(core_validation::layer_data *device_data) {
5940    return &device_data->imageSubresourceMap;
5941}
5942
5943std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> *GetImageLayoutMap(layer_data *device_data) {
5944    return &device_data->imageLayoutMap;
5945}
5946
5947std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> const *GetImageLayoutMap(layer_data const *device_data) {
5948    return &device_data->imageLayoutMap;
5949}
5950
5951std::unordered_map<VkBuffer, std::unique_ptr<BUFFER_STATE>> *GetBufferMap(layer_data *device_data) {
5952    return &device_data->bufferMap;
5953}
5954
5955std::unordered_map<VkBufferView, std::unique_ptr<BUFFER_VIEW_STATE>> *GetBufferViewMap(layer_data *device_data) {
5956    return &device_data->bufferViewMap;
5957}
5958
5959std::unordered_map<VkImageView, std::unique_ptr<IMAGE_VIEW_STATE>> *GetImageViewMap(layer_data *device_data) {
5960    return &device_data->imageViewMap;
5961}
5962
5963const PHYS_DEV_PROPERTIES_NODE *GetPhysDevProperties(const layer_data *device_data) {
5964    return &device_data->phys_dev_properties;
5965}
5966
5967const VkPhysicalDeviceFeatures *GetEnabledFeatures(const layer_data *device_data) {
5968    return &device_data->enabled_features;
5969}
5970
5971const devExts *GetDeviceExtensions(const layer_data *device_data) { return &device_data->device_extensions; }
5972
5973VKAPI_ATTR VkResult VKAPI_CALL CreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo,
5974                                           const VkAllocationCallbacks *pAllocator, VkImage *pImage) {
5975    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5976    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5977    bool skip = PreCallValidateCreateImage(dev_data, pCreateInfo, pAllocator, pImage);
5978    if (!skip) {
5979        result = dev_data->dispatch_table.CreateImage(device, pCreateInfo, pAllocator, pImage);
5980    }
5981    if (VK_SUCCESS == result) {
5982        std::lock_guard<std::mutex> lock(global_lock);
5983        PostCallRecordCreateImage(dev_data, pCreateInfo, pImage);
5984    }
5985    return result;
5986}
5987
5988VKAPI_ATTR VkResult VKAPI_CALL CreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo,
5989                                               const VkAllocationCallbacks *pAllocator, VkImageView *pView) {
5990    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5991    std::unique_lock<std::mutex> lock(global_lock);
5992    bool skip = PreCallValidateCreateImageView(dev_data, pCreateInfo);
5993    lock.unlock();
5994    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
5995    VkResult result = dev_data->dispatch_table.CreateImageView(device, pCreateInfo, pAllocator, pView);
5996    if (VK_SUCCESS == result) {
5997        lock.lock();
5998        PostCallRecordCreateImageView(dev_data, pCreateInfo, *pView);
5999        lock.unlock();
6000    }
6001
6002    return result;
6003}
6004
6005VKAPI_ATTR VkResult VKAPI_CALL CreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo,
6006                                           const VkAllocationCallbacks *pAllocator, VkFence *pFence) {
6007    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
6008    VkResult result = dev_data->dispatch_table.CreateFence(device, pCreateInfo, pAllocator, pFence);
6009    if (VK_SUCCESS == result) {
6010        std::lock_guard<std::mutex> lock(global_lock);
6011        auto &fence_node = dev_data->fenceMap[*pFence];
6012        fence_node.fence = *pFence;
6013        fence_node.createInfo = *pCreateInfo;
6014        fence_node.state = (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) ? FENCE_RETIRED : FENCE_UNSIGNALED;
6015    }
6016    return result;
6017}
6018
6019// TODO handle pipeline caches
6020VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo *pCreateInfo,
6021                                                   const VkAllocationCallbacks *pAllocator, VkPipelineCache *pPipelineCache) {
6022    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
6023    VkResult result = dev_data->dispatch_table.CreatePipelineCache(device, pCreateInfo, pAllocator, pPipelineCache);
6024    return result;
6025}
6026
6027VKAPI_ATTR void VKAPI_CALL DestroyPipelineCache(VkDevice device, VkPipelineCache pipelineCache,
6028                                                const VkAllocationCallbacks *pAllocator) {
6029    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
6030    dev_data->dispatch_table.DestroyPipelineCache(device, pipelineCache, pAllocator);
6031}
6032
6033VKAPI_ATTR VkResult VKAPI_CALL GetPipelineCacheData(VkDevice device, VkPipelineCache pipelineCache, size_t *pDataSize,
6034                                                    void *pData) {
6035    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
6036    VkResult result = dev_data->dispatch_table.GetPipelineCacheData(device, pipelineCache, pDataSize, pData);
6037    return result;
6038}
6039
6040VKAPI_ATTR VkResult VKAPI_CALL MergePipelineCaches(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount,
6041                                                   const VkPipelineCache *pSrcCaches) {
6042    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
6043    VkResult result = dev_data->dispatch_table.MergePipelineCaches(device, dstCache, srcCacheCount, pSrcCaches);
6044    return result;
6045}
6046
6047// utility function to set collective state for pipeline
6048void set_pipeline_state(PIPELINE_STATE *pPipe) {
6049    // If any attachment used by this pipeline has blendEnable, set top-level blendEnable
6050    if (pPipe->graphicsPipelineCI.pColorBlendState) {
6051        for (size_t i = 0; i < pPipe->attachments.size(); ++i) {
6052            if (VK_TRUE == pPipe->attachments[i].blendEnable) {
6053                if (((pPipe->attachments[i].dstAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6054                     (pPipe->attachments[i].dstAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6055                    ((pPipe->attachments[i].dstColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6056                     (pPipe->attachments[i].dstColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6057                    ((pPipe->attachments[i].srcAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6058                     (pPipe->attachments[i].srcAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6059                    ((pPipe->attachments[i].srcColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6060                     (pPipe->attachments[i].srcColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA))) {
6061                    pPipe->blendConstantsEnabled = true;
6062                }
6063            }
6064        }
6065    }
6066}
6067
6068bool validate_dual_src_blend_feature(layer_data *device_data, PIPELINE_STATE *pipe_state) {
6069    bool skip = false;
6070    if (pipe_state->graphicsPipelineCI.pColorBlendState) {
6071        for (size_t i = 0; i < pipe_state->attachments.size(); ++i) {
6072            if (!device_data->enabled_features.dualSrcBlend) {
6073                if ((pipe_state->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) ||
6074                    (pipe_state->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) ||
6075                    (pipe_state->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) ||
6076                    (pipe_state->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA) ||
6077                    (pipe_state->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) ||
6078                    (pipe_state->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) ||
6079                    (pipe_state->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) ||
6080                    (pipe_state->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) {
6081                    skip |=
6082                        log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
6083                                reinterpret_cast<uint64_t &>(pipe_state->pipeline), __LINE__, DRAWSTATE_INVALID_FEATURE, "DS",
6084                                "CmdBindPipeline: vkPipeline (0x%" PRIxLEAST64 ") attachment[" PRINTF_SIZE_T_SPECIFIER
6085                                "] has a dual-source blend factor but this device feature is not enabled.",
6086                                reinterpret_cast<uint64_t &>(pipe_state->pipeline), i);
6087                }
6088            }
6089        }
6090    }
6091    return skip;
6092}
6093
6094static bool PreCallCreateGraphicsPipelines(layer_data *device_data, uint32_t count,
6095                                           const VkGraphicsPipelineCreateInfo *create_infos, vector<PIPELINE_STATE *> &pipe_state) {
6096    bool skip = false;
6097    instance_layer_data *instance_data =
6098        GetLayerDataPtr(get_dispatch_key(device_data->instance_data->instance), instance_layer_data_map);
6099
6100    for (uint32_t i = 0; i < count; i++) {
6101        skip |= verifyPipelineCreateState(device_data, pipe_state, i);
6102        if (create_infos[i].pVertexInputState != NULL) {
6103            for (uint32_t j = 0; j < create_infos[i].pVertexInputState->vertexAttributeDescriptionCount; j++) {
6104                VkFormat format = create_infos[i].pVertexInputState->pVertexAttributeDescriptions[j].format;
6105                // Internal call to get format info.  Still goes through layers, could potentially go directly to ICD.
6106                VkFormatProperties properties;
6107                instance_data->dispatch_table.GetPhysicalDeviceFormatProperties(device_data->physical_device, format, &properties);
6108                if ((properties.bufferFeatures & VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT) == 0) {
6109                    skip |= log_msg(
6110                        device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
6111                        __LINE__, VALIDATION_ERROR_01413, "IMAGE",
6112                        "vkCreateGraphicsPipelines: pCreateInfo[%d].pVertexInputState->vertexAttributeDescriptions[%d].format "
6113                        "(%s) is not a supported vertex buffer format. %s",
6114                        i, j, string_VkFormat(format), validation_error_map[VALIDATION_ERROR_01413]);
6115                }
6116            }
6117        }
6118    }
6119    return skip;
6120}
6121
6122VKAPI_ATTR VkResult VKAPI_CALL CreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
6123                                                       const VkGraphicsPipelineCreateInfo *pCreateInfos,
6124                                                       const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines) {
6125    // TODO What to do with pipelineCache?
6126    // The order of operations here is a little convoluted but gets the job done
6127    //  1. Pipeline create state is first shadowed into PIPELINE_STATE struct
6128    //  2. Create state is then validated (which uses flags setup during shadowing)
6129    //  3. If everything looks good, we'll then create the pipeline and add NODE to pipelineMap
6130    bool skip = false;
6131    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
6132    vector<PIPELINE_STATE *> pipe_state(count);
6133    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
6134
6135    uint32_t i = 0;
6136    std::unique_lock<std::mutex> lock(global_lock);
6137
6138    for (i = 0; i < count; i++) {
6139        pipe_state[i] = new PIPELINE_STATE;
6140        pipe_state[i]->initGraphicsPipeline(&pCreateInfos[i]);
6141        pipe_state[i]->render_pass_ci.initialize(GetRenderPassState(dev_data, pCreateInfos[i].renderPass)->createInfo.ptr());
6142        pipe_state[i]->pipeline_layout = *getPipelineLayout(dev_data, pCreateInfos[i].layout);
6143    }
6144    skip |= PreCallCreateGraphicsPipelines(dev_data, count, pCreateInfos, pipe_state);
6145
6146    if (skip) {
6147        for (i = 0; i < count; i++) {
6148            delete pipe_state[i];
6149            pPipelines[i] = VK_NULL_HANDLE;
6150        }
6151        return VK_ERROR_VALIDATION_FAILED_EXT;
6152    }
6153
6154    lock.unlock();
6155    auto result =
6156        dev_data->dispatch_table.CreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines);
6157    lock.lock();
6158    for (i = 0; i < count; i++) {
6159        if (pPipelines[i] == VK_NULL_HANDLE) {
6160            delete pipe_state[i];
6161        } else {
6162            pipe_state[i]->pipeline = pPipelines[i];
6163            dev_data->pipelineMap[pipe_state[i]->pipeline] = pipe_state[i];
6164        }
6165    }
6166
6167    return result;
6168}
6169
6170VKAPI_ATTR VkResult VKAPI_CALL CreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
6171                                                      const VkComputePipelineCreateInfo *pCreateInfos,
6172                                                      const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines) {
6173    bool skip = false;
6174
6175    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
6176    vector<PIPELINE_STATE *> pPipeState(count);
6177    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
6178
6179    uint32_t i = 0;
6180    std::unique_lock<std::mutex> lock(global_lock);
6181    for (i = 0; i < count; i++) {
6182        // TODO: Verify compute stage bits
6183
6184        // Create and initialize internal tracking data structure
6185        pPipeState[i] = new PIPELINE_STATE;
6186        pPipeState[i]->initComputePipeline(&pCreateInfos[i]);
6187        pPipeState[i]->pipeline_layout = *getPipelineLayout(dev_data, pCreateInfos[i].layout);
6188
6189        // TODO: Add Compute Pipeline Verification
6190        skip |= !validate_compute_pipeline(dev_data, pPipeState[i]);
6191        // skip |= verifyPipelineCreateState(dev_data, pPipeState[i]);
6192    }
6193
6194    if (skip) {
6195        for (i = 0; i < count; i++) {
6196            // Clean up any locally allocated data structures
6197            delete pPipeState[i];
6198            pPipelines[i] = VK_NULL_HANDLE;
6199        }
6200        return VK_ERROR_VALIDATION_FAILED_EXT;
6201    }
6202
6203    lock.unlock();
6204    auto result =
6205        dev_data->dispatch_table.CreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines);
6206    lock.lock();
6207    for (i = 0; i < count; i++) {
6208        if (pPipelines[i] == VK_NULL_HANDLE) {
6209            delete pPipeState[i];
6210        } else {
6211            pPipeState[i]->pipeline = pPipelines[i];
6212            dev_data->pipelineMap[pPipeState[i]->pipeline] = pPipeState[i];
6213        }
6214    }
6215
6216    return result;
6217}
6218
6219VKAPI_ATTR VkResult VKAPI_CALL CreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo,
6220                                             const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) {
6221    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
6222    VkResult result = dev_data->dispatch_table.CreateSampler(device, pCreateInfo, pAllocator, pSampler);
6223    if (VK_SUCCESS == result) {
6224        std::lock_guard<std::mutex> lock(global_lock);
6225        dev_data->samplerMap[*pSampler] = unique_ptr<SAMPLER_STATE>(new SAMPLER_STATE(pSampler, pCreateInfo));
6226    }
6227    return result;
6228}
6229
6230static bool PreCallValidateCreateDescriptorSetLayout(layer_data *dev_data, const VkDescriptorSetLayoutCreateInfo *create_info) {
6231    if (dev_data->instance_data->disabled.create_descriptor_set_layout) return false;
6232    return cvdescriptorset::DescriptorSetLayout::ValidateCreateInfo(dev_data->report_data, create_info);
6233}
6234
6235static void PostCallRecordCreateDescriptorSetLayout(layer_data *dev_data, const VkDescriptorSetLayoutCreateInfo *create_info,
6236                                                    VkDescriptorSetLayout set_layout) {
6237    // TODO: Convert this to unique_ptr to avoid leaks
6238    dev_data->descriptorSetLayoutMap[set_layout] = new cvdescriptorset::DescriptorSetLayout(create_info, set_layout);
6239}
6240
6241VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
6242                                                         const VkAllocationCallbacks *pAllocator,
6243                                                         VkDescriptorSetLayout *pSetLayout) {
6244    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
6245    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
6246    std::unique_lock<std::mutex> lock(global_lock);
6247    bool skip = PreCallValidateCreateDescriptorSetLayout(dev_data, pCreateInfo);
6248    if (!skip) {
6249        lock.unlock();
6250        result = dev_data->dispatch_table.CreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout);
6251        if (VK_SUCCESS == result) {
6252            lock.lock();
6253            PostCallRecordCreateDescriptorSetLayout(dev_data, pCreateInfo, *pSetLayout);
6254        }
6255    }
6256    return result;
6257}
6258
6259// Used by CreatePipelineLayout and CmdPushConstants.
6260// Note that the index argument is optional and only used by CreatePipelineLayout.
6261static bool validatePushConstantRange(const layer_data *dev_data, const uint32_t offset, const uint32_t size,
6262                                      const char *caller_name, uint32_t index = 0) {
6263    if (dev_data->instance_data->disabled.push_constant_range) return false;
6264    uint32_t const maxPushConstantsSize = dev_data->phys_dev_properties.properties.limits.maxPushConstantsSize;
6265    bool skip = false;
6266    // Check that offset + size don't exceed the max.
6267    // Prevent arithetic overflow here by avoiding addition and testing in this order.
6268    if ((offset >= maxPushConstantsSize) || (size > maxPushConstantsSize - offset)) {
6269        // This is a pain just to adapt the log message to the caller, but better to sort it out only when there is a problem.
6270        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
6271            if (offset >= maxPushConstantsSize) {
6272                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
6273                                __LINE__, VALIDATION_ERROR_00877, "DS",
6274                                "%s call has push constants index %u with offset %u that "
6275                                "exceeds this device's maxPushConstantSize of %u. %s",
6276                                caller_name, index, offset, maxPushConstantsSize, validation_error_map[VALIDATION_ERROR_00877]);
6277            }
6278            if (size > maxPushConstantsSize - offset) {
6279                skip |=
6280                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
6281                            __LINE__, VALIDATION_ERROR_00880, "DS",
6282                            "%s call has push constants index %u with offset %u and size %u that "
6283                            "exceeds this device's maxPushConstantSize of %u. %s",
6284                            caller_name, index, offset, size, maxPushConstantsSize, validation_error_map[VALIDATION_ERROR_00880]);
6285            }
6286        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
6287            if (offset >= maxPushConstantsSize) {
6288                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
6289                                __LINE__, VALIDATION_ERROR_00991, "DS",
6290                                "%s call has push constants index %u with offset %u that "
6291                                "exceeds this device's maxPushConstantSize of %u. %s",
6292                                caller_name, index, offset, maxPushConstantsSize, validation_error_map[VALIDATION_ERROR_00991]);
6293            }
6294            if (size > maxPushConstantsSize - offset) {
6295                skip |=
6296                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
6297                            __LINE__, VALIDATION_ERROR_00992, "DS",
6298                            "%s call has push constants index %u with offset %u and size %u that "
6299                            "exceeds this device's maxPushConstantSize of %u. %s",
6300                            caller_name, index, offset, size, maxPushConstantsSize, validation_error_map[VALIDATION_ERROR_00992]);
6301            }
6302        } else {
6303            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
6304                            __LINE__, DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
6305        }
6306    }
6307    // size needs to be non-zero and a multiple of 4.
6308    if ((size == 0) || ((size & 0x3) != 0)) {
6309        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
6310            if (size == 0) {
6311                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
6312                                __LINE__, VALIDATION_ERROR_00878, "DS",
6313                                "%s call has push constants index %u with "
6314                                "size %u. Size must be greater than zero. %s",
6315                                caller_name, index, size, validation_error_map[VALIDATION_ERROR_00878]);
6316            }
6317            if (size & 0x3) {
6318                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
6319                                __LINE__, VALIDATION_ERROR_00879, "DS",
6320                                "%s call has push constants index %u with "
6321                                "size %u. Size must be a multiple of 4. %s",
6322                                caller_name, index, size, validation_error_map[VALIDATION_ERROR_00879]);
6323            }
6324        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
6325            if (size == 0) {
6326                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
6327                                __LINE__, VALIDATION_ERROR_01000, "DS",
6328                                "%s call has push constants index %u with "
6329                                "size %u. Size must be greater than zero. %s",
6330                                caller_name, index, size, validation_error_map[VALIDATION_ERROR_01000]);
6331            }
6332            if (size & 0x3) {
6333                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
6334                                __LINE__, VALIDATION_ERROR_00990, "DS",
6335                                "%s call has push constants index %u with "
6336                                "size %u. Size must be a multiple of 4. %s",
6337                                caller_name, index, size, validation_error_map[VALIDATION_ERROR_00990]);
6338            }
6339        } else {
6340            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
6341                            __LINE__, DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
6342        }
6343    }
6344    // offset needs to be a multiple of 4.
6345    if ((offset & 0x3) != 0) {
6346        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
6347            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
6348                            __LINE__, VALIDATION_ERROR_02521, "DS",
6349                            "%s call has push constants index %u with "
6350                            "offset %u. Offset must be a multiple of 4. %s",
6351                            caller_name, index, offset, validation_error_map[VALIDATION_ERROR_02521]);
6352        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
6353            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
6354                            __LINE__, VALIDATION_ERROR_00989, "DS",
6355                            "%s call has push constants with "
6356                            "offset %u. Offset must be a multiple of 4. %s",
6357                            caller_name, offset, validation_error_map[VALIDATION_ERROR_00989]);
6358        } else {
6359            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
6360                            __LINE__, DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
6361        }
6362    }
6363    return skip;
6364}
6365
6366VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
6367                                                    const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout) {
6368    bool skip = false;
6369    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
6370    // TODO : Add checks for VALIDATION_ERRORS 865-870
6371    // Push Constant Range checks
6372    uint32_t i, j;
6373    for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
6374        skip |= validatePushConstantRange(dev_data, pCreateInfo->pPushConstantRanges[i].offset,
6375                                          pCreateInfo->pPushConstantRanges[i].size, "vkCreatePipelineLayout()", i);
6376        if (0 == pCreateInfo->pPushConstantRanges[i].stageFlags) {
6377            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
6378                            __LINE__, VALIDATION_ERROR_00882, "DS", "vkCreatePipelineLayout() call has no stageFlags set. %s",
6379                            validation_error_map[VALIDATION_ERROR_00882]);
6380        }
6381    }
6382    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
6383
6384    // As of 1.0.28, there is a VU that states that a stage flag cannot appear more than once in the list of push constant ranges.
6385    for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
6386        for (j = i + 1; j < pCreateInfo->pushConstantRangeCount; ++j) {
6387            if (0 != (pCreateInfo->pPushConstantRanges[i].stageFlags & pCreateInfo->pPushConstantRanges[j].stageFlags)) {
6388                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
6389                                __LINE__, VALIDATION_ERROR_00871, "DS",
6390                                "vkCreatePipelineLayout() Duplicate stage flags found in ranges %d and %d. %s", i, j,
6391                                validation_error_map[VALIDATION_ERROR_00871]);
6392            }
6393        }
6394    }
6395
6396    VkResult result = dev_data->dispatch_table.CreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout);
6397    if (VK_SUCCESS == result) {
6398        std::lock_guard<std::mutex> lock(global_lock);
6399        PIPELINE_LAYOUT_NODE &plNode = dev_data->pipelineLayoutMap[*pPipelineLayout];
6400        plNode.layout = *pPipelineLayout;
6401        plNode.set_layouts.resize(pCreateInfo->setLayoutCount);
6402        for (i = 0; i < pCreateInfo->setLayoutCount; ++i) {
6403            plNode.set_layouts[i] = GetDescriptorSetLayout(dev_data, pCreateInfo->pSetLayouts[i]);
6404        }
6405        plNode.push_constant_ranges.resize(pCreateInfo->pushConstantRangeCount);
6406        for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
6407            plNode.push_constant_ranges[i] = pCreateInfo->pPushConstantRanges[i];
6408        }
6409    }
6410    return result;
6411}
6412
6413VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreateInfo,
6414                                                    const VkAllocationCallbacks *pAllocator, VkDescriptorPool *pDescriptorPool) {
6415    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
6416    VkResult result = dev_data->dispatch_table.CreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool);
6417    if (VK_SUCCESS == result) {
6418        DESCRIPTOR_POOL_STATE *pNewNode = new DESCRIPTOR_POOL_STATE(*pDescriptorPool, pCreateInfo);
6419        if (NULL == pNewNode) {
6420            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6421                        (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
6422                        "Out of memory while attempting to allocate DESCRIPTOR_POOL_STATE in vkCreateDescriptorPool()"))
6423                return VK_ERROR_VALIDATION_FAILED_EXT;
6424        } else {
6425            std::lock_guard<std::mutex> lock(global_lock);
6426            dev_data->descriptorPoolMap[*pDescriptorPool] = pNewNode;
6427        }
6428    } else {
6429        // Need to do anything if pool create fails?
6430    }
6431    return result;
6432}
6433
6434VKAPI_ATTR VkResult VKAPI_CALL ResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
6435                                                   VkDescriptorPoolResetFlags flags) {
6436    // TODO : Add checks for VALIDATION_ERROR_00928
6437    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
6438    VkResult result = dev_data->dispatch_table.ResetDescriptorPool(device, descriptorPool, flags);
6439    if (VK_SUCCESS == result) {
6440        std::lock_guard<std::mutex> lock(global_lock);
6441        clearDescriptorPool(dev_data, device, descriptorPool, flags);
6442    }
6443    return result;
6444}
6445// Ensure the pool contains enough descriptors and descriptor sets to satisfy
6446// an allocation request. Fills common_data with the total number of descriptors of each type required,
6447// as well as DescriptorSetLayout ptrs used for later update.
6448static bool PreCallValidateAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo,
6449                                                  cvdescriptorset::AllocateDescriptorSetsData *common_data) {
6450    // Always update common data
6451    cvdescriptorset::UpdateAllocateDescriptorSetsData(dev_data, pAllocateInfo, common_data);
6452    if (dev_data->instance_data->disabled.allocate_descriptor_sets) return false;
6453    // All state checks for AllocateDescriptorSets is done in single function
6454    return cvdescriptorset::ValidateAllocateDescriptorSets(dev_data, pAllocateInfo, common_data);
6455}
6456// Allocation state was good and call down chain was made so update state based on allocating descriptor sets
6457static void PostCallRecordAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo,
6458                                                 VkDescriptorSet *pDescriptorSets,
6459                                                 const cvdescriptorset::AllocateDescriptorSetsData *common_data) {
6460    // All the updates are contained in a single cvdescriptorset function
6461    cvdescriptorset::PerformAllocateDescriptorSets(pAllocateInfo, pDescriptorSets, common_data, &dev_data->descriptorPoolMap,
6462                                                   &dev_data->setMap, dev_data);
6463}
6464
6465VKAPI_ATTR VkResult VKAPI_CALL AllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo,
6466                                                      VkDescriptorSet *pDescriptorSets) {
6467    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
6468    std::unique_lock<std::mutex> lock(global_lock);
6469    cvdescriptorset::AllocateDescriptorSetsData common_data(pAllocateInfo->descriptorSetCount);
6470    bool skip = PreCallValidateAllocateDescriptorSets(dev_data, pAllocateInfo, &common_data);
6471    lock.unlock();
6472
6473    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
6474
6475    VkResult result = dev_data->dispatch_table.AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
6476
6477    if (VK_SUCCESS == result) {
6478        lock.lock();
6479        PostCallRecordAllocateDescriptorSets(dev_data, pAllocateInfo, pDescriptorSets, &common_data);
6480        lock.unlock();
6481    }
6482    return result;
6483}
6484// Verify state before freeing DescriptorSets
6485static bool PreCallValidateFreeDescriptorSets(const layer_data *dev_data, VkDescriptorPool pool, uint32_t count,
6486                                              const VkDescriptorSet *descriptor_sets) {
6487    if (dev_data->instance_data->disabled.free_descriptor_sets) return false;
6488    bool skip = false;
6489    // First make sure sets being destroyed are not currently in-use
6490    for (uint32_t i = 0; i < count; ++i) {
6491        if (descriptor_sets[i] != VK_NULL_HANDLE) {
6492            skip |= validateIdleDescriptorSet(dev_data, descriptor_sets[i], "vkFreeDescriptorSets");
6493        }
6494    }
6495
6496    DESCRIPTOR_POOL_STATE *pool_state = GetDescriptorPoolState(dev_data, pool);
6497    if (pool_state && !(VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT & pool_state->createInfo.flags)) {
6498        // Can't Free from a NON_FREE pool
6499        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6500                        reinterpret_cast<uint64_t &>(pool), __LINE__, VALIDATION_ERROR_00922, "DS",
6501                        "It is invalid to call vkFreeDescriptorSets() with a pool created without setting "
6502                        "VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT. %s",
6503                        validation_error_map[VALIDATION_ERROR_00922]);
6504    }
6505    return skip;
6506}
6507// Sets have been removed from the pool so update underlying state
6508static void PostCallRecordFreeDescriptorSets(layer_data *dev_data, VkDescriptorPool pool, uint32_t count,
6509                                             const VkDescriptorSet *descriptor_sets) {
6510    DESCRIPTOR_POOL_STATE *pool_state = GetDescriptorPoolState(dev_data, pool);
6511    // Update available descriptor sets in pool
6512    pool_state->availableSets += count;
6513
6514    // For each freed descriptor add its resources back into the pool as available and remove from pool and setMap
6515    for (uint32_t i = 0; i < count; ++i) {
6516        if (descriptor_sets[i] != VK_NULL_HANDLE) {
6517            auto descriptor_set = dev_data->setMap[descriptor_sets[i]];
6518            uint32_t type_index = 0, descriptor_count = 0;
6519            for (uint32_t j = 0; j < descriptor_set->GetBindingCount(); ++j) {
6520                type_index = static_cast<uint32_t>(descriptor_set->GetTypeFromIndex(j));
6521                descriptor_count = descriptor_set->GetDescriptorCountFromIndex(j);
6522                pool_state->availableDescriptorTypeCount[type_index] += descriptor_count;
6523            }
6524            freeDescriptorSet(dev_data, descriptor_set);
6525            pool_state->sets.erase(descriptor_set);
6526        }
6527    }
6528}
6529
6530VKAPI_ATTR VkResult VKAPI_CALL FreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count,
6531                                                  const VkDescriptorSet *pDescriptorSets) {
6532    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
6533    // Make sure that no sets being destroyed are in-flight
6534    std::unique_lock<std::mutex> lock(global_lock);
6535    bool skip = PreCallValidateFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
6536    lock.unlock();
6537
6538    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
6539    VkResult result = dev_data->dispatch_table.FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets);
6540    if (VK_SUCCESS == result) {
6541        lock.lock();
6542        PostCallRecordFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
6543        lock.unlock();
6544    }
6545    return result;
6546}
6547// TODO : This is a Proof-of-concept for core validation architecture
6548//  Really we'll want to break out these functions to separate files but
6549//  keeping it all together here to prove out design
6550// PreCallValidate* handles validating all of the state prior to calling down chain to UpdateDescriptorSets()
6551static bool PreCallValidateUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
6552                                                const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
6553                                                const VkCopyDescriptorSet *pDescriptorCopies) {
6554    if (dev_data->instance_data->disabled.update_descriptor_sets) return false;
6555    // First thing to do is perform map look-ups.
6556    // NOTE : UpdateDescriptorSets is somewhat unique in that it's operating on a number of DescriptorSets
6557    //  so we can't just do a single map look-up up-front, but do them individually in functions below
6558
6559    // Now make call(s) that validate state, but don't perform state updates in this function
6560    // Note, here DescriptorSets is unique in that we don't yet have an instance. Using a helper function in the
6561    //  namespace which will parse params and make calls into specific class instances
6562    return cvdescriptorset::ValidateUpdateDescriptorSets(dev_data->report_data, dev_data, descriptorWriteCount, pDescriptorWrites,
6563                                                         descriptorCopyCount, pDescriptorCopies);
6564}
6565// PostCallRecord* handles recording state updates following call down chain to UpdateDescriptorSets()
6566static void PostCallRecordUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
6567                                               const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
6568                                               const VkCopyDescriptorSet *pDescriptorCopies) {
6569    cvdescriptorset::PerformUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6570                                                 pDescriptorCopies);
6571}
6572
6573VKAPI_ATTR void VKAPI_CALL UpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount,
6574                                                const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
6575                                                const VkCopyDescriptorSet *pDescriptorCopies) {
6576    // Only map look-up at top level is for device-level layer_data
6577    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
6578    std::unique_lock<std::mutex> lock(global_lock);
6579    bool skip = PreCallValidateUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6580                                                    pDescriptorCopies);
6581    lock.unlock();
6582    if (!skip) {
6583        dev_data->dispatch_table.UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6584                                                      pDescriptorCopies);
6585        lock.lock();
6586        // Since UpdateDescriptorSets() is void, nothing to check prior to updating state
6587        PostCallRecordUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6588                                           pDescriptorCopies);
6589    }
6590}
6591
6592VKAPI_ATTR VkResult VKAPI_CALL AllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pCreateInfo,
6593                                                      VkCommandBuffer *pCommandBuffer) {
6594    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
6595    VkResult result = dev_data->dispatch_table.AllocateCommandBuffers(device, pCreateInfo, pCommandBuffer);
6596    if (VK_SUCCESS == result) {
6597        std::unique_lock<std::mutex> lock(global_lock);
6598        auto pPool = GetCommandPoolNode(dev_data, pCreateInfo->commandPool);
6599
6600        if (pPool) {
6601            for (uint32_t i = 0; i < pCreateInfo->commandBufferCount; i++) {
6602                // Add command buffer to its commandPool map
6603                pPool->commandBuffers.push_back(pCommandBuffer[i]);
6604                GLOBAL_CB_NODE *pCB = new GLOBAL_CB_NODE;
6605                // Add command buffer to map
6606                dev_data->commandBufferMap[pCommandBuffer[i]] = pCB;
6607                resetCB(dev_data, pCommandBuffer[i]);
6608                pCB->createInfo = *pCreateInfo;
6609                pCB->device = device;
6610            }
6611        }
6612        lock.unlock();
6613    }
6614    return result;
6615}
6616
6617// Add bindings between the given cmd buffer & framebuffer and the framebuffer's children
6618static void AddFramebufferBinding(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, FRAMEBUFFER_STATE *fb_state) {
6619    addCommandBufferBinding(&fb_state->cb_bindings,
6620                            {reinterpret_cast<uint64_t &>(fb_state->framebuffer), kVulkanObjectTypeFramebuffer},
6621                            cb_state);
6622    for (auto attachment : fb_state->attachments) {
6623        auto view_state = attachment.view_state;
6624        if (view_state) {
6625            AddCommandBufferBindingImageView(dev_data, cb_state, view_state);
6626        }
6627        auto rp_state = GetRenderPassState(dev_data, fb_state->createInfo.renderPass);
6628        if (rp_state) {
6629            addCommandBufferBinding(
6630                &rp_state->cb_bindings,
6631                {reinterpret_cast<uint64_t &>(rp_state->renderPass), kVulkanObjectTypeRenderPass}, cb_state);
6632        }
6633    }
6634}
6635
6636VKAPI_ATTR VkResult VKAPI_CALL BeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) {
6637    bool skip = false;
6638    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6639    std::unique_lock<std::mutex> lock(global_lock);
6640    // Validate command buffer level
6641    GLOBAL_CB_NODE *cb_node = GetCBNode(dev_data, commandBuffer);
6642    if (cb_node) {
6643        // This implicitly resets the Cmd Buffer so make sure any fence is done and then clear memory references
6644        if (dev_data->globalInFlightCmdBuffers.count(commandBuffer)) {
6645            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6646                            (uint64_t)commandBuffer, __LINE__, VALIDATION_ERROR_00103, "MEM",
6647                            "Calling vkBeginCommandBuffer() on active command buffer 0x%p before it has completed. "
6648                            "You must check command buffer fence before this call. %s",
6649                            commandBuffer, validation_error_map[VALIDATION_ERROR_00103]);
6650        }
6651        clear_cmd_buf_and_mem_references(dev_data, cb_node);
6652        if (cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
6653            // Secondary Command Buffer
6654            const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo;
6655            if (!pInfo) {
6656                skip |=
6657                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6658                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, VALIDATION_ERROR_00106, "DS",
6659                            "vkBeginCommandBuffer(): Secondary Command Buffer (0x%p) must have inheritance info. %s", commandBuffer,
6660                            validation_error_map[VALIDATION_ERROR_00106]);
6661            } else {
6662                if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
6663                    // Object_tracker makes sure these objects are valid
6664                    assert(pInfo->renderPass);
6665                    assert(pInfo->framebuffer);
6666                    string errorString = "";
6667                    auto framebuffer = GetFramebufferState(dev_data, pInfo->framebuffer);
6668                    if (framebuffer) {
6669                        if ((framebuffer->createInfo.renderPass != pInfo->renderPass) &&
6670                            !verify_renderpass_compatibility(dev_data, framebuffer->renderPassCreateInfo.ptr(),
6671                                                             GetRenderPassState(dev_data, pInfo->renderPass)->createInfo.ptr(),
6672                                                             errorString)) {
6673                            // renderPass that framebuffer was created with must be compatible with local renderPass
6674                            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6675                                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6676                                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, VALIDATION_ERROR_00112, "DS",
6677                                            "vkBeginCommandBuffer(): Secondary Command "
6678                                            "Buffer (0x%p) renderPass (0x%" PRIxLEAST64
6679                                            ") is incompatible w/ framebuffer "
6680                                            "(0x%" PRIxLEAST64 ") w/ render pass (0x%" PRIxLEAST64 ") due to: %s. %s",
6681                                            commandBuffer, reinterpret_cast<const uint64_t &>(pInfo->renderPass),
6682                                            reinterpret_cast<const uint64_t &>(pInfo->framebuffer),
6683                                            reinterpret_cast<uint64_t &>(framebuffer->createInfo.renderPass), errorString.c_str(),
6684                                            validation_error_map[VALIDATION_ERROR_00112]);
6685                        }
6686                        // Connect this framebuffer and its children to this cmdBuffer
6687                        AddFramebufferBinding(dev_data, cb_node, framebuffer);
6688                    }
6689                }
6690                if ((pInfo->occlusionQueryEnable == VK_FALSE || dev_data->enabled_features.occlusionQueryPrecise == VK_FALSE) &&
6691                    (pInfo->queryFlags & VK_QUERY_CONTROL_PRECISE_BIT)) {
6692                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6693                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(commandBuffer),
6694                                    __LINE__, VALIDATION_ERROR_00107, "DS",
6695                                    "vkBeginCommandBuffer(): Secondary Command Buffer (0x%p) must not have "
6696                                    "VK_QUERY_CONTROL_PRECISE_BIT if occulusionQuery is disabled or the device does not "
6697                                    "support precise occlusion queries. %s",
6698                                    commandBuffer, validation_error_map[VALIDATION_ERROR_00107]);
6699                }
6700            }
6701            if (pInfo && pInfo->renderPass != VK_NULL_HANDLE) {
6702                auto renderPass = GetRenderPassState(dev_data, pInfo->renderPass);
6703                if (renderPass) {
6704                    if (pInfo->subpass >= renderPass->createInfo.subpassCount) {
6705                        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6706                                        VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__,
6707                                        VALIDATION_ERROR_00111, "DS",
6708                                        "vkBeginCommandBuffer(): Secondary Command Buffers (0x%p) must have a subpass index (%d) "
6709                                        "that is less than the number of subpasses (%d). %s",
6710                                        commandBuffer, pInfo->subpass, renderPass->createInfo.subpassCount,
6711                                        validation_error_map[VALIDATION_ERROR_00111]);
6712                    }
6713                }
6714            }
6715        }
6716        if (CB_RECORDING == cb_node->state) {
6717            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6718                            (uint64_t)commandBuffer, __LINE__, VALIDATION_ERROR_00103, "DS",
6719                            "vkBeginCommandBuffer(): Cannot call Begin on command buffer (0x%p"
6720                            ") in the RECORDING state. Must first call vkEndCommandBuffer(). %s",
6721                            commandBuffer, validation_error_map[VALIDATION_ERROR_00103]);
6722        } else if (CB_RECORDED == cb_node->state || (CB_INVALID == cb_node->state && CMD_END == cb_node->last_cmd)) {
6723            VkCommandPool cmdPool = cb_node->createInfo.commandPool;
6724            auto pPool = GetCommandPoolNode(dev_data, cmdPool);
6725            if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
6726                skip |=
6727                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6728                            (uint64_t)commandBuffer, __LINE__, VALIDATION_ERROR_00105, "DS",
6729                            "Call to vkBeginCommandBuffer() on command buffer (0x%p"
6730                            ") attempts to implicitly reset cmdBuffer created from command pool (0x%" PRIxLEAST64
6731                            ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set. %s",
6732                            commandBuffer, (uint64_t)cmdPool, validation_error_map[VALIDATION_ERROR_00105]);
6733            }
6734            resetCB(dev_data, commandBuffer);
6735        }
6736        // Set updated state here in case implicit reset occurs above
6737        cb_node->state = CB_RECORDING;
6738        cb_node->beginInfo = *pBeginInfo;
6739        if (cb_node->beginInfo.pInheritanceInfo) {
6740            cb_node->inheritanceInfo = *(cb_node->beginInfo.pInheritanceInfo);
6741            cb_node->beginInfo.pInheritanceInfo = &cb_node->inheritanceInfo;
6742            // If we are a secondary command-buffer and inheriting.  Update the items we should inherit.
6743            if ((cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) &&
6744                (cb_node->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
6745                cb_node->activeRenderPass = GetRenderPassState(dev_data, cb_node->beginInfo.pInheritanceInfo->renderPass);
6746                cb_node->activeSubpass = cb_node->beginInfo.pInheritanceInfo->subpass;
6747                cb_node->activeFramebuffer = cb_node->beginInfo.pInheritanceInfo->framebuffer;
6748                cb_node->framebuffers.insert(cb_node->beginInfo.pInheritanceInfo->framebuffer);
6749            }
6750        }
6751    }
6752    lock.unlock();
6753    if (skip) {
6754        return VK_ERROR_VALIDATION_FAILED_EXT;
6755    }
6756    VkResult result = dev_data->dispatch_table.BeginCommandBuffer(commandBuffer, pBeginInfo);
6757
6758    return result;
6759}
6760
6761VKAPI_ATTR VkResult VKAPI_CALL EndCommandBuffer(VkCommandBuffer commandBuffer) {
6762    bool skip = false;
6763    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6764    std::unique_lock<std::mutex> lock(global_lock);
6765    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6766    if (pCB) {
6767        if ((VK_COMMAND_BUFFER_LEVEL_PRIMARY == pCB->createInfo.level) ||
6768            !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
6769            // This needs spec clarification to update valid usage, see comments in PR:
6770            // https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/pull/516#discussion_r63013756
6771            skip |= insideRenderPass(dev_data, pCB, "vkEndCommandBuffer()", VALIDATION_ERROR_00123);
6772        }
6773        skip |= ValidateCmd(dev_data, pCB, CMD_END, "vkEndCommandBuffer()");
6774        UpdateCmdBufferLastCmd(pCB, CMD_END);
6775        for (auto query : pCB->activeQueries) {
6776            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6777                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, VALIDATION_ERROR_00124, "DS",
6778                            "Ending command buffer with in progress query: queryPool 0x%" PRIx64 ", index %d. %s",
6779                            (uint64_t)(query.pool), query.index, validation_error_map[VALIDATION_ERROR_00124]);
6780        }
6781    }
6782    if (!skip) {
6783        lock.unlock();
6784        auto result = dev_data->dispatch_table.EndCommandBuffer(commandBuffer);
6785        lock.lock();
6786        if (VK_SUCCESS == result) {
6787            pCB->state = CB_RECORDED;
6788        }
6789        return result;
6790    } else {
6791        return VK_ERROR_VALIDATION_FAILED_EXT;
6792    }
6793}
6794
6795VKAPI_ATTR VkResult VKAPI_CALL ResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) {
6796    bool skip = false;
6797    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6798    std::unique_lock<std::mutex> lock(global_lock);
6799    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6800    VkCommandPool cmdPool = pCB->createInfo.commandPool;
6801    auto pPool = GetCommandPoolNode(dev_data, cmdPool);
6802    if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
6803        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6804                        (uint64_t)commandBuffer, __LINE__, VALIDATION_ERROR_00093, "DS",
6805                        "Attempt to reset command buffer (0x%p) created from command pool (0x%" PRIxLEAST64
6806                        ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set. %s",
6807                        commandBuffer, (uint64_t)cmdPool, validation_error_map[VALIDATION_ERROR_00093]);
6808    }
6809    skip |= checkCommandBufferInFlight(dev_data, pCB, "reset", VALIDATION_ERROR_00092);
6810    lock.unlock();
6811    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
6812    VkResult result = dev_data->dispatch_table.ResetCommandBuffer(commandBuffer, flags);
6813    if (VK_SUCCESS == result) {
6814        lock.lock();
6815        dev_data->globalInFlightCmdBuffers.erase(commandBuffer);
6816        resetCB(dev_data, commandBuffer);
6817        lock.unlock();
6818    }
6819    return result;
6820}
6821
6822VKAPI_ATTR void VKAPI_CALL CmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
6823                                           VkPipeline pipeline) {
6824    bool skip = false;
6825    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6826    std::unique_lock<std::mutex> lock(global_lock);
6827    GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
6828    if (cb_state) {
6829        skip |= ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdBindPipeline()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
6830                                      VALIDATION_ERROR_00603);
6831        skip |= ValidateCmd(dev_data, cb_state, CMD_BINDPIPELINE, "vkCmdBindPipeline()");
6832        UpdateCmdBufferLastCmd(cb_state, CMD_BINDPIPELINE);
6833        if ((VK_PIPELINE_BIND_POINT_COMPUTE == pipelineBindPoint) && (cb_state->activeRenderPass)) {
6834            skip |=
6835                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
6836                        (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
6837                        "Incorrectly binding compute pipeline (0x%" PRIxLEAST64 ") during active RenderPass (0x%" PRIxLEAST64 ")",
6838                        (uint64_t)pipeline, (uint64_t)cb_state->activeRenderPass->renderPass);
6839        }
6840        // TODO: VALIDATION_ERROR_00594 VALIDATION_ERROR_00596
6841
6842        PIPELINE_STATE *pipe_state = getPipelineState(dev_data, pipeline);
6843        if (pipe_state) {
6844            cb_state->lastBound[pipelineBindPoint].pipeline_state = pipe_state;
6845            set_cb_pso_status(cb_state, pipe_state);
6846            set_pipeline_state(pipe_state);
6847            skip |= validate_dual_src_blend_feature(dev_data, pipe_state);
6848        } else {
6849            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
6850                            (uint64_t)pipeline, __LINE__, VALIDATION_ERROR_00600, "DS",
6851                            "Attempt to bind Pipeline 0x%" PRIxLEAST64 " that doesn't exist! %s", (uint64_t)(pipeline),
6852                            validation_error_map[VALIDATION_ERROR_00600]);
6853        }
6854        addCommandBufferBinding(&pipe_state->cb_bindings,
6855                                {reinterpret_cast<uint64_t &>(pipeline), kVulkanObjectTypePipeline}, cb_state);
6856        if (VK_PIPELINE_BIND_POINT_GRAPHICS == pipelineBindPoint) {
6857            // Add binding for child renderpass
6858            auto rp_state = GetRenderPassState(dev_data, pipe_state->graphicsPipelineCI.renderPass);
6859            if (rp_state) {
6860                addCommandBufferBinding(
6861                    &rp_state->cb_bindings,
6862                    {reinterpret_cast<uint64_t &>(rp_state->renderPass), kVulkanObjectTypeRenderPass}, cb_state);
6863            }
6864        }
6865    }
6866    lock.unlock();
6867    if (!skip) dev_data->dispatch_table.CmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline);
6868}
6869
6870VKAPI_ATTR void VKAPI_CALL CmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount,
6871                                          const VkViewport *pViewports) {
6872    bool skip = false;
6873    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6874    std::unique_lock<std::mutex> lock(global_lock);
6875    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6876    if (pCB) {
6877        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetViewport()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_01446);
6878        skip |= ValidateCmd(dev_data, pCB, CMD_SETVIEWPORTSTATE, "vkCmdSetViewport()");
6879        UpdateCmdBufferLastCmd(pCB, CMD_SETVIEWPORTSTATE);
6880        pCB->viewportMask |= ((1u << viewportCount) - 1u) << firstViewport;
6881    }
6882    lock.unlock();
6883    if (!skip) dev_data->dispatch_table.CmdSetViewport(commandBuffer, firstViewport, viewportCount, pViewports);
6884}
6885
6886VKAPI_ATTR void VKAPI_CALL CmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount,
6887                                         const VkRect2D *pScissors) {
6888    bool skip = false;
6889    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6890    std::unique_lock<std::mutex> lock(global_lock);
6891    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6892    if (pCB) {
6893        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetScissor()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_01495);
6894        skip |= ValidateCmd(dev_data, pCB, CMD_SETSCISSORSTATE, "vkCmdSetScissor()");
6895        UpdateCmdBufferLastCmd(pCB, CMD_SETSCISSORSTATE);
6896        pCB->scissorMask |= ((1u << scissorCount) - 1u) << firstScissor;
6897    }
6898    lock.unlock();
6899    if (!skip) dev_data->dispatch_table.CmdSetScissor(commandBuffer, firstScissor, scissorCount, pScissors);
6900}
6901
6902VKAPI_ATTR void VKAPI_CALL CmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) {
6903    bool skip = false;
6904    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6905    std::unique_lock<std::mutex> lock(global_lock);
6906    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6907    if (pCB) {
6908        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetLineWidth()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_01480);
6909        skip |= ValidateCmd(dev_data, pCB, CMD_SETLINEWIDTHSTATE, "vkCmdSetLineWidth()");
6910        UpdateCmdBufferLastCmd(pCB, CMD_SETLINEWIDTHSTATE);
6911        pCB->status |= CBSTATUS_LINE_WIDTH_SET;
6912
6913        PIPELINE_STATE *pPipeTrav = pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline_state;
6914        if (pPipeTrav != NULL && !isDynamic(pPipeTrav, VK_DYNAMIC_STATE_LINE_WIDTH)) {
6915            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6916                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, VALIDATION_ERROR_01476, "DS",
6917                            "vkCmdSetLineWidth called but pipeline was created without VK_DYNAMIC_STATE_LINE_WIDTH "
6918                            "flag.  This is undefined behavior and could be ignored. %s",
6919                            validation_error_map[VALIDATION_ERROR_01476]);
6920        } else {
6921            skip |= verifyLineWidth(dev_data, DRAWSTATE_INVALID_SET, kVulkanObjectTypeCommandBuffer,
6922                                    reinterpret_cast<uint64_t &>(commandBuffer), lineWidth);
6923        }
6924    }
6925    lock.unlock();
6926    if (!skip) dev_data->dispatch_table.CmdSetLineWidth(commandBuffer, lineWidth);
6927}
6928
6929VKAPI_ATTR void VKAPI_CALL CmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp,
6930                                           float depthBiasSlopeFactor) {
6931    bool skip = false;
6932    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6933    std::unique_lock<std::mutex> lock(global_lock);
6934    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6935    if (pCB) {
6936        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetDepthBias()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_01485);
6937        skip |= ValidateCmd(dev_data, pCB, CMD_SETDEPTHBIASSTATE, "vkCmdSetDepthBias()");
6938        if ((depthBiasClamp != 0.0) && (!dev_data->enabled_features.depthBiasClamp)) {
6939            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6940                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, VALIDATION_ERROR_01482, "DS",
6941                            "vkCmdSetDepthBias(): the depthBiasClamp device feature is disabled: the depthBiasClamp "
6942                            "parameter must be set to 0.0. %s",
6943                            validation_error_map[VALIDATION_ERROR_01482]);
6944        }
6945        if (!skip) {
6946            UpdateCmdBufferLastCmd(pCB, CMD_SETDEPTHBIASSTATE);
6947            pCB->status |= CBSTATUS_DEPTH_BIAS_SET;
6948        }
6949    }
6950    lock.unlock();
6951    if (!skip)
6952        dev_data->dispatch_table.CmdSetDepthBias(commandBuffer, depthBiasConstantFactor, depthBiasClamp, depthBiasSlopeFactor);
6953}
6954
6955VKAPI_ATTR void VKAPI_CALL CmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) {
6956    bool skip = false;
6957    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6958    std::unique_lock<std::mutex> lock(global_lock);
6959    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6960    if (pCB) {
6961        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetBlendConstants()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_01553);
6962        skip |= ValidateCmd(dev_data, pCB, CMD_SETBLENDSTATE, "vkCmdSetBlendConstants()");
6963        UpdateCmdBufferLastCmd(pCB, CMD_SETBLENDSTATE);
6964        pCB->status |= CBSTATUS_BLEND_CONSTANTS_SET;
6965    }
6966    lock.unlock();
6967    if (!skip) dev_data->dispatch_table.CmdSetBlendConstants(commandBuffer, blendConstants);
6968}
6969
6970VKAPI_ATTR void VKAPI_CALL CmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) {
6971    bool skip = false;
6972    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6973    std::unique_lock<std::mutex> lock(global_lock);
6974    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6975    if (pCB) {
6976        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetDepthBounds()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_01509);
6977        skip |= ValidateCmd(dev_data, pCB, CMD_SETDEPTHBOUNDSSTATE, "vkCmdSetDepthBounds()");
6978        UpdateCmdBufferLastCmd(pCB, CMD_SETDEPTHBOUNDSSTATE);
6979        pCB->status |= CBSTATUS_DEPTH_BOUNDS_SET;
6980    }
6981    lock.unlock();
6982    if (!skip) dev_data->dispatch_table.CmdSetDepthBounds(commandBuffer, minDepthBounds, maxDepthBounds);
6983}
6984
6985VKAPI_ATTR void VKAPI_CALL CmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask,
6986                                                    uint32_t compareMask) {
6987    bool skip = false;
6988    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6989    std::unique_lock<std::mutex> lock(global_lock);
6990    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6991    if (pCB) {
6992        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetStencilCompareMask()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_01519);
6993        skip |= ValidateCmd(dev_data, pCB, CMD_SETSTENCILREADMASKSTATE, "vkCmdSetStencilCompareMask()");
6994        UpdateCmdBufferLastCmd(pCB, CMD_SETSTENCILREADMASKSTATE);
6995        pCB->status |= CBSTATUS_STENCIL_READ_MASK_SET;
6996    }
6997    lock.unlock();
6998    if (!skip) dev_data->dispatch_table.CmdSetStencilCompareMask(commandBuffer, faceMask, compareMask);
6999}
7000
7001VKAPI_ATTR void VKAPI_CALL CmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask) {
7002    bool skip = false;
7003    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7004    std::unique_lock<std::mutex> lock(global_lock);
7005    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
7006    if (pCB) {
7007        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetStencilWriteMask()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_01525);
7008        skip |= ValidateCmd(dev_data, pCB, CMD_SETSTENCILWRITEMASKSTATE, "vkCmdSetStencilWriteMask()");
7009        UpdateCmdBufferLastCmd(pCB, CMD_SETSTENCILWRITEMASKSTATE);
7010        pCB->status |= CBSTATUS_STENCIL_WRITE_MASK_SET;
7011    }
7012    lock.unlock();
7013    if (!skip) dev_data->dispatch_table.CmdSetStencilWriteMask(commandBuffer, faceMask, writeMask);
7014}
7015
7016VKAPI_ATTR void VKAPI_CALL CmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference) {
7017    bool skip = false;
7018    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7019    std::unique_lock<std::mutex> lock(global_lock);
7020    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
7021    if (pCB) {
7022        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetStencilReference()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_01531);
7023        skip |= ValidateCmd(dev_data, pCB, CMD_SETSTENCILREFERENCESTATE, "vkCmdSetStencilReference()");
7024        UpdateCmdBufferLastCmd(pCB, CMD_SETSTENCILREFERENCESTATE);
7025        pCB->status |= CBSTATUS_STENCIL_REFERENCE_SET;
7026    }
7027    lock.unlock();
7028    if (!skip) dev_data->dispatch_table.CmdSetStencilReference(commandBuffer, faceMask, reference);
7029}
7030
7031VKAPI_ATTR void VKAPI_CALL CmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
7032                                                 VkPipelineLayout layout, uint32_t firstSet, uint32_t setCount,
7033                                                 const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount,
7034                                                 const uint32_t *pDynamicOffsets) {
7035    bool skip = false;
7036    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7037    std::unique_lock<std::mutex> lock(global_lock);
7038    GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
7039    if (cb_state) {
7040        skip |= ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdBindDescriptorSets()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
7041                                      VALIDATION_ERROR_00985);
7042        skip |= ValidateCmd(dev_data, cb_state, CMD_BINDDESCRIPTORSETS, "vkCmdBindDescriptorSets()");
7043        // Track total count of dynamic descriptor types to make sure we have an offset for each one
7044        uint32_t total_dynamic_descriptors = 0;
7045        string error_string = "";
7046        uint32_t last_set_index = firstSet + setCount - 1;
7047        if (last_set_index >= cb_state->lastBound[pipelineBindPoint].boundDescriptorSets.size()) {
7048            cb_state->lastBound[pipelineBindPoint].boundDescriptorSets.resize(last_set_index + 1);
7049            cb_state->lastBound[pipelineBindPoint].dynamicOffsets.resize(last_set_index + 1);
7050        }
7051        auto old_final_bound_set = cb_state->lastBound[pipelineBindPoint].boundDescriptorSets[last_set_index];
7052        auto pipeline_layout = getPipelineLayout(dev_data, layout);
7053        for (uint32_t set_idx = 0; set_idx < setCount; set_idx++) {
7054            cvdescriptorset::DescriptorSet *descriptor_set = GetSetNode(dev_data, pDescriptorSets[set_idx]);
7055            if (descriptor_set) {
7056                cb_state->lastBound[pipelineBindPoint].pipeline_layout = *pipeline_layout;
7057                cb_state->lastBound[pipelineBindPoint].boundDescriptorSets[set_idx + firstSet] = descriptor_set;
7058                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
7059                                VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[set_idx], __LINE__,
7060                                DRAWSTATE_NONE, "DS", "Descriptor Set 0x%" PRIxLEAST64 " bound on pipeline %s",
7061                                (uint64_t)pDescriptorSets[set_idx], string_VkPipelineBindPoint(pipelineBindPoint));
7062                if (!descriptor_set->IsUpdated() && (descriptor_set->GetTotalDescriptorCount() != 0)) {
7063                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
7064                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[set_idx], __LINE__,
7065                                    DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
7066                                    "Descriptor Set 0x%" PRIxLEAST64
7067                                    " bound but it was never updated. You may want to either update it or not bind it.",
7068                                    (uint64_t)pDescriptorSets[set_idx]);
7069                }
7070                // Verify that set being bound is compatible with overlapping setLayout of pipelineLayout
7071                if (!verify_set_layout_compatibility(descriptor_set, pipeline_layout, set_idx + firstSet, error_string)) {
7072                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7073                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[set_idx], __LINE__,
7074                                    VALIDATION_ERROR_00974, "DS",
7075                                    "descriptorSet #%u being bound is not compatible with overlapping descriptorSetLayout "
7076                                    "at index %u of pipelineLayout 0x%" PRIxLEAST64 " due to: %s. %s",
7077                                    set_idx, set_idx + firstSet, reinterpret_cast<uint64_t &>(layout), error_string.c_str(),
7078                                    validation_error_map[VALIDATION_ERROR_00974]);
7079                }
7080
7081                auto set_dynamic_descriptor_count = descriptor_set->GetDynamicDescriptorCount();
7082
7083                cb_state->lastBound[pipelineBindPoint].dynamicOffsets[firstSet + set_idx].clear();
7084
7085                if (set_dynamic_descriptor_count) {
7086                    // First make sure we won't overstep bounds of pDynamicOffsets array
7087                    if ((total_dynamic_descriptors + set_dynamic_descriptor_count) > dynamicOffsetCount) {
7088                        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7089                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[set_idx],
7090                                        __LINE__, DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
7091                                        "descriptorSet #%u (0x%" PRIxLEAST64
7092                                        ") requires %u dynamicOffsets, but only %u dynamicOffsets are left in pDynamicOffsets "
7093                                        "array. There must be one dynamic offset for each dynamic descriptor being bound.",
7094                                        set_idx, (uint64_t)pDescriptorSets[set_idx], descriptor_set->GetDynamicDescriptorCount(),
7095                                        (dynamicOffsetCount - total_dynamic_descriptors));
7096                    } else {  // Validate and store dynamic offsets with the set
7097                        // Validate Dynamic Offset Minimums
7098                        uint32_t cur_dyn_offset = total_dynamic_descriptors;
7099                        for (uint32_t d = 0; d < descriptor_set->GetTotalDescriptorCount(); d++) {
7100                            if (descriptor_set->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
7101                                if (SafeModulo(
7102                                        pDynamicOffsets[cur_dyn_offset],
7103                                        dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment) != 0) {
7104                                    skip |= log_msg(
7105                                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7106                                        VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, VALIDATION_ERROR_00978, "DS",
7107                                        "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
7108                                        "device limit minUniformBufferOffsetAlignment 0x%" PRIxLEAST64 ". %s",
7109                                        cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
7110                                        dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment,
7111                                        validation_error_map[VALIDATION_ERROR_00978]);
7112                                }
7113                                cur_dyn_offset++;
7114                            } else if (descriptor_set->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
7115                                if (SafeModulo(
7116                                        pDynamicOffsets[cur_dyn_offset],
7117                                        dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment) != 0) {
7118                                    skip |= log_msg(
7119                                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7120                                        VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, VALIDATION_ERROR_00978, "DS",
7121                                        "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
7122                                        "device limit minStorageBufferOffsetAlignment 0x%" PRIxLEAST64 ". %s",
7123                                        cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
7124                                        dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment,
7125                                        validation_error_map[VALIDATION_ERROR_00978]);
7126                                }
7127                                cur_dyn_offset++;
7128                            }
7129                        }
7130
7131                        cb_state->lastBound[pipelineBindPoint].dynamicOffsets[firstSet + set_idx] =
7132                            std::vector<uint32_t>(pDynamicOffsets + total_dynamic_descriptors,
7133                                                  pDynamicOffsets + total_dynamic_descriptors + set_dynamic_descriptor_count);
7134                        // Keep running total of dynamic descriptor count to verify at the end
7135                        total_dynamic_descriptors += set_dynamic_descriptor_count;
7136                    }
7137                }
7138            } else {
7139                skip |= log_msg(
7140                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
7141                    (uint64_t)pDescriptorSets[set_idx], __LINE__, DRAWSTATE_INVALID_SET, "DS",
7142                    "Attempt to bind descriptor set 0x%" PRIxLEAST64 " that doesn't exist!", (uint64_t)pDescriptorSets[set_idx]);
7143            }
7144            UpdateCmdBufferLastCmd(cb_state, CMD_BINDDESCRIPTORSETS);
7145            // For any previously bound sets, need to set them to "invalid" if they were disturbed by this update
7146            if (firstSet > 0) {  // Check set #s below the first bound set
7147                for (uint32_t i = 0; i < firstSet; ++i) {
7148                    if (cb_state->lastBound[pipelineBindPoint].boundDescriptorSets[i] &&
7149                        !verify_set_layout_compatibility(cb_state->lastBound[pipelineBindPoint].boundDescriptorSets[i],
7150                                                         pipeline_layout, i, error_string)) {
7151                        skip |= log_msg(
7152                            dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7153                            VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
7154                            (uint64_t)cb_state->lastBound[pipelineBindPoint].boundDescriptorSets[i], __LINE__, DRAWSTATE_NONE, "DS",
7155                            "DescriptorSet 0x%" PRIxLEAST64
7156                            " previously bound as set #%u was disturbed by newly bound pipelineLayout (0x%" PRIxLEAST64 ")",
7157                            (uint64_t)cb_state->lastBound[pipelineBindPoint].boundDescriptorSets[i], i, (uint64_t)layout);
7158                        cb_state->lastBound[pipelineBindPoint].boundDescriptorSets[i] = VK_NULL_HANDLE;
7159                    }
7160                }
7161            }
7162            // Check if newly last bound set invalidates any remaining bound sets
7163            if ((cb_state->lastBound[pipelineBindPoint].boundDescriptorSets.size() - 1) > (last_set_index)) {
7164                if (old_final_bound_set &&
7165                    !verify_set_layout_compatibility(old_final_bound_set, pipeline_layout, last_set_index, error_string)) {
7166                    auto old_set = old_final_bound_set->GetSet();
7167                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7168                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, reinterpret_cast<uint64_t &>(old_set), __LINE__,
7169                                    DRAWSTATE_NONE, "DS", "DescriptorSet 0x%" PRIxLEAST64
7170                                                          " previously bound as set #%u is incompatible with set 0x%" PRIxLEAST64
7171                                                          " newly bound as set #%u so set #%u and any subsequent sets were "
7172                                                          "disturbed by newly bound pipelineLayout (0x%" PRIxLEAST64 ")",
7173                                    reinterpret_cast<uint64_t &>(old_set), last_set_index,
7174                                    (uint64_t)cb_state->lastBound[pipelineBindPoint].boundDescriptorSets[last_set_index],
7175                                    last_set_index, last_set_index + 1, (uint64_t)layout);
7176                    cb_state->lastBound[pipelineBindPoint].boundDescriptorSets.resize(last_set_index + 1);
7177                }
7178            }
7179        }
7180        //  dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound
7181        if (total_dynamic_descriptors != dynamicOffsetCount) {
7182            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7183                            (uint64_t)commandBuffer, __LINE__, VALIDATION_ERROR_00975, "DS",
7184                            "Attempting to bind %u descriptorSets with %u dynamic descriptors, but dynamicOffsetCount "
7185                            "is %u. It should exactly match the number of dynamic descriptors. %s",
7186                            setCount, total_dynamic_descriptors, dynamicOffsetCount, validation_error_map[VALIDATION_ERROR_00975]);
7187        }
7188    }
7189    lock.unlock();
7190    if (!skip)
7191        dev_data->dispatch_table.CmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet, setCount,
7192                                                       pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
7193}
7194
7195VKAPI_ATTR void VKAPI_CALL CmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
7196                                              VkIndexType indexType) {
7197    bool skip = false;
7198    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7199    // TODO : Somewhere need to verify that IBs have correct usage state flagged
7200    std::unique_lock<std::mutex> lock(global_lock);
7201
7202    auto buffer_state = GetBufferState(dev_data, buffer);
7203    auto cb_node = GetCBNode(dev_data, commandBuffer);
7204    if (cb_node && buffer_state) {
7205        skip |= ValidateCmdQueueFlags(dev_data, cb_node, "vkCmdBindIndexBuffer()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_01357);
7206        skip |= ValidateCmd(dev_data, cb_node, CMD_BINDINDEXBUFFER, "vkCmdBindIndexBuffer()");
7207        skip |= ValidateMemoryIsBoundToBuffer(dev_data, buffer_state, "vkCmdBindIndexBuffer()", VALIDATION_ERROR_02543);
7208        std::function<bool()> function = [=]() {
7209            return ValidateBufferMemoryIsValid(dev_data, buffer_state, "vkCmdBindIndexBuffer()");
7210        };
7211        cb_node->validate_functions.push_back(function);
7212        UpdateCmdBufferLastCmd(cb_node, CMD_BINDINDEXBUFFER);
7213        VkDeviceSize offset_align = 0;
7214        switch (indexType) {
7215            case VK_INDEX_TYPE_UINT16:
7216                offset_align = 2;
7217                break;
7218            case VK_INDEX_TYPE_UINT32:
7219                offset_align = 4;
7220                break;
7221            default:
7222                // ParamChecker should catch bad enum, we'll also throw alignment error below if offset_align stays 0
7223                break;
7224        }
7225        if (!offset_align || (offset % offset_align)) {
7226            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7227                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_VTX_INDEX_ALIGNMENT_ERROR, "DS",
7228                            "vkCmdBindIndexBuffer() offset (0x%" PRIxLEAST64 ") does not fall on alignment (%s) boundary.", offset,
7229                            string_VkIndexType(indexType));
7230        }
7231        cb_node->status |= CBSTATUS_INDEX_BUFFER_BOUND;
7232    } else {
7233        assert(0);
7234    }
7235    lock.unlock();
7236    if (!skip) dev_data->dispatch_table.CmdBindIndexBuffer(commandBuffer, buffer, offset, indexType);
7237}
7238
7239void updateResourceTracking(GLOBAL_CB_NODE *pCB, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers) {
7240    uint32_t end = firstBinding + bindingCount;
7241    if (pCB->currentDrawData.buffers.size() < end) {
7242        pCB->currentDrawData.buffers.resize(end);
7243    }
7244    for (uint32_t i = 0; i < bindingCount; ++i) {
7245        pCB->currentDrawData.buffers[i + firstBinding] = pBuffers[i];
7246    }
7247}
7248
7249static inline void updateResourceTrackingOnDraw(GLOBAL_CB_NODE *pCB) { pCB->drawData.push_back(pCB->currentDrawData); }
7250
7251VKAPI_ATTR void VKAPI_CALL CmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount,
7252                                                const VkBuffer *pBuffers, const VkDeviceSize *pOffsets) {
7253    bool skip = false;
7254    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7255    // TODO : Somewhere need to verify that VBs have correct usage state flagged
7256    std::unique_lock<std::mutex> lock(global_lock);
7257
7258    auto cb_node = GetCBNode(dev_data, commandBuffer);
7259    if (cb_node) {
7260        skip |= ValidateCmdQueueFlags(dev_data, cb_node, "vkCmdBindVertexBuffers()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_01423);
7261        skip |= ValidateCmd(dev_data, cb_node, CMD_BINDVERTEXBUFFER, "vkCmdBindVertexBuffers()");
7262        for (uint32_t i = 0; i < bindingCount; ++i) {
7263            auto buffer_state = GetBufferState(dev_data, pBuffers[i]);
7264            assert(buffer_state);
7265            skip |= ValidateMemoryIsBoundToBuffer(dev_data, buffer_state, "vkCmdBindVertexBuffers()", VALIDATION_ERROR_02546);
7266            std::function<bool()> function = [=]() {
7267                return ValidateBufferMemoryIsValid(dev_data, buffer_state, "vkCmdBindVertexBuffers()");
7268            };
7269            cb_node->validate_functions.push_back(function);
7270        }
7271        UpdateCmdBufferLastCmd(cb_node, CMD_BINDVERTEXBUFFER);
7272        updateResourceTracking(cb_node, firstBinding, bindingCount, pBuffers);
7273    } else {
7274        assert(0);
7275    }
7276    lock.unlock();
7277    if (!skip) dev_data->dispatch_table.CmdBindVertexBuffers(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets);
7278}
7279
7280// Expects global_lock to be held by caller
7281static void MarkStoreImagesAndBuffersAsWritten(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
7282    for (auto imageView : pCB->updateImages) {
7283        auto view_state = GetImageViewState(dev_data, imageView);
7284        if (!view_state) continue;
7285
7286        auto image_state = GetImageState(dev_data, view_state->create_info.image);
7287        assert(image_state);
7288        std::function<bool()> function = [=]() {
7289            SetImageMemoryValid(dev_data, image_state, true);
7290            return false;
7291        };
7292        pCB->validate_functions.push_back(function);
7293    }
7294    for (auto buffer : pCB->updateBuffers) {
7295        auto buffer_state = GetBufferState(dev_data, buffer);
7296        assert(buffer_state);
7297        std::function<bool()> function = [=]() {
7298            SetBufferMemoryValid(dev_data, buffer_state, true);
7299            return false;
7300        };
7301        pCB->validate_functions.push_back(function);
7302    }
7303}
7304
7305// Generic function to handle validation for all CmdDraw* type functions
7306static bool ValidateCmdDrawType(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed, VkPipelineBindPoint bind_point,
7307                                CMD_TYPE cmd_type, GLOBAL_CB_NODE **cb_state, const char *caller, VkQueueFlags queue_flags,
7308                                UNIQUE_VALIDATION_ERROR_CODE queue_flag_code, UNIQUE_VALIDATION_ERROR_CODE msg_code,
7309                                UNIQUE_VALIDATION_ERROR_CODE const dynamic_state_msg_code) {
7310    bool skip = false;
7311    *cb_state = GetCBNode(dev_data, cmd_buffer);
7312    if (*cb_state) {
7313        skip |= ValidateCmdQueueFlags(dev_data, *cb_state, caller, queue_flags, queue_flag_code);
7314        skip |= ValidateCmd(dev_data, *cb_state, cmd_type, caller);
7315        skip |= ValidateDrawState(dev_data, *cb_state, indexed, bind_point, caller, dynamic_state_msg_code);
7316        skip |= (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point) ? outsideRenderPass(dev_data, *cb_state, caller, msg_code)
7317                                                                : insideRenderPass(dev_data, *cb_state, caller, msg_code);
7318    }
7319    return skip;
7320}
7321
7322// Generic function to handle state update for all CmdDraw* and CmdDispatch* type functions
7323static void UpdateStateCmdDrawDispatchType(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
7324                                           CMD_TYPE cmd_type) {
7325    UpdateDrawState(dev_data, cb_state, bind_point);
7326    MarkStoreImagesAndBuffersAsWritten(dev_data, cb_state);
7327    UpdateCmdBufferLastCmd(cb_state, cmd_type);
7328}
7329
7330// Generic function to handle state update for all CmdDraw* type functions
7331static void UpdateStateCmdDrawType(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
7332                                   CMD_TYPE cmd_type) {
7333    UpdateStateCmdDrawDispatchType(dev_data, cb_state, bind_point, cmd_type);
7334    updateResourceTrackingOnDraw(cb_state);
7335    cb_state->hasDrawCmd = true;
7336}
7337
7338static bool PreCallValidateCmdDraw(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed, VkPipelineBindPoint bind_point,
7339                                   GLOBAL_CB_NODE **cb_state, const char *caller) {
7340    return ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAW, cb_state, caller, VK_QUEUE_GRAPHICS_BIT,
7341                               VALIDATION_ERROR_01364, VALIDATION_ERROR_01365, VALIDATION_ERROR_02203);
7342}
7343
7344static void PostCallRecordCmdDraw(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
7345    UpdateStateCmdDrawType(dev_data, cb_state, bind_point, CMD_DRAW);
7346}
7347
7348VKAPI_ATTR void VKAPI_CALL CmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
7349                                   uint32_t firstVertex, uint32_t firstInstance) {
7350    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7351    GLOBAL_CB_NODE *cb_state = nullptr;
7352    std::unique_lock<std::mutex> lock(global_lock);
7353    bool skip = PreCallValidateCmdDraw(dev_data, commandBuffer, false, VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state, "vkCmdDraw()");
7354    lock.unlock();
7355    if (!skip) {
7356        dev_data->dispatch_table.CmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
7357        lock.lock();
7358        PostCallRecordCmdDraw(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS);
7359        lock.unlock();
7360    }
7361}
7362
7363static bool PreCallValidateCmdDrawIndexed(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed,
7364                                          VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state, const char *caller) {
7365    return ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAWINDEXED, cb_state, caller, VK_QUEUE_GRAPHICS_BIT,
7366                               VALIDATION_ERROR_01371, VALIDATION_ERROR_01372, VALIDATION_ERROR_02216);
7367}
7368
7369static void PostCallRecordCmdDrawIndexed(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
7370    UpdateStateCmdDrawType(dev_data, cb_state, bind_point, CMD_DRAWINDEXED);
7371}
7372
7373VKAPI_ATTR void VKAPI_CALL CmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount,
7374                                          uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance) {
7375    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7376    GLOBAL_CB_NODE *cb_state = nullptr;
7377    std::unique_lock<std::mutex> lock(global_lock);
7378    bool skip = PreCallValidateCmdDrawIndexed(dev_data, commandBuffer, true, VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state,
7379                                              "vkCmdDrawIndexed()");
7380    lock.unlock();
7381    if (!skip) {
7382        dev_data->dispatch_table.CmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset, firstInstance);
7383        lock.lock();
7384        PostCallRecordCmdDrawIndexed(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS);
7385        lock.unlock();
7386    }
7387}
7388
7389static bool PreCallValidateCmdDrawIndirect(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkBuffer buffer, bool indexed,
7390                                           VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state, BUFFER_STATE **buffer_state,
7391                                           const char *caller) {
7392    bool skip = ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAWINDIRECT, cb_state, caller,
7393                                    VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_01380, VALIDATION_ERROR_01381, VALIDATION_ERROR_02234);
7394    *buffer_state = GetBufferState(dev_data, buffer);
7395    skip |= ValidateMemoryIsBoundToBuffer(dev_data, *buffer_state, caller, VALIDATION_ERROR_02544);
7396    // TODO: If the drawIndirectFirstInstance feature is not enabled, all the firstInstance members of the
7397    // VkDrawIndirectCommand structures accessed by this command must be 0, which will require access to the contents of 'buffer'.
7398    return skip;
7399}
7400
7401static void PostCallRecordCmdDrawIndirect(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
7402                                          BUFFER_STATE *buffer_state) {
7403    UpdateStateCmdDrawType(dev_data, cb_state, bind_point, CMD_DRAWINDIRECT);
7404    AddCommandBufferBindingBuffer(dev_data, cb_state, buffer_state);
7405}
7406
7407VKAPI_ATTR void VKAPI_CALL CmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count,
7408                                           uint32_t stride) {
7409    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7410    GLOBAL_CB_NODE *cb_state = nullptr;
7411    BUFFER_STATE *buffer_state = nullptr;
7412    std::unique_lock<std::mutex> lock(global_lock);
7413    bool skip = PreCallValidateCmdDrawIndirect(dev_data, commandBuffer, buffer, false, VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state,
7414                                               &buffer_state, "vkCmdDrawIndirect()");
7415    lock.unlock();
7416    if (!skip) {
7417        dev_data->dispatch_table.CmdDrawIndirect(commandBuffer, buffer, offset, count, stride);
7418        lock.lock();
7419        PostCallRecordCmdDrawIndirect(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS, buffer_state);
7420        lock.unlock();
7421    }
7422}
7423
7424static bool PreCallValidateCmdDrawIndexedIndirect(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkBuffer buffer, bool indexed,
7425                                                  VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state,
7426                                                  BUFFER_STATE **buffer_state, const char *caller) {
7427    bool skip = ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAWINDEXEDINDIRECT, cb_state, caller,
7428                                    VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_01392, VALIDATION_ERROR_01393, VALIDATION_ERROR_02272);
7429    *buffer_state = GetBufferState(dev_data, buffer);
7430    skip |= ValidateMemoryIsBoundToBuffer(dev_data, *buffer_state, caller, VALIDATION_ERROR_02545);
7431    // TODO: If the drawIndirectFirstInstance feature is not enabled, all the firstInstance members of the
7432    // VkDrawIndexedIndirectCommand structures accessed by this command must be 0, which will require access to the contents of
7433    // 'buffer'.
7434    return skip;
7435}
7436
7437static void PostCallRecordCmdDrawIndexedIndirect(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
7438                                                 BUFFER_STATE *buffer_state) {
7439    UpdateStateCmdDrawType(dev_data, cb_state, bind_point, CMD_DRAWINDEXEDINDIRECT);
7440    AddCommandBufferBindingBuffer(dev_data, cb_state, buffer_state);
7441}
7442
7443VKAPI_ATTR void VKAPI_CALL CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
7444                                                  uint32_t count, uint32_t stride) {
7445    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7446    GLOBAL_CB_NODE *cb_state = nullptr;
7447    BUFFER_STATE *buffer_state = nullptr;
7448    std::unique_lock<std::mutex> lock(global_lock);
7449    bool skip = PreCallValidateCmdDrawIndexedIndirect(dev_data, commandBuffer, buffer, true, VK_PIPELINE_BIND_POINT_GRAPHICS,
7450                                                      &cb_state, &buffer_state, "vkCmdDrawIndexedIndirect()");
7451    lock.unlock();
7452    if (!skip) {
7453        dev_data->dispatch_table.CmdDrawIndexedIndirect(commandBuffer, buffer, offset, count, stride);
7454        lock.lock();
7455        PostCallRecordCmdDrawIndexedIndirect(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS, buffer_state);
7456        lock.unlock();
7457    }
7458}
7459
7460static bool PreCallValidateCmdDispatch(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed,
7461                                       VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state, const char *caller) {
7462    return ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DISPATCH, cb_state, caller, VK_QUEUE_COMPUTE_BIT,
7463                               VALIDATION_ERROR_01561, VALIDATION_ERROR_01562, VALIDATION_ERROR_UNDEFINED);
7464}
7465
7466static void PostCallRecordCmdDispatch(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
7467    UpdateStateCmdDrawDispatchType(dev_data, cb_state, bind_point, CMD_DISPATCH);
7468}
7469
7470VKAPI_ATTR void VKAPI_CALL CmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
7471    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7472    GLOBAL_CB_NODE *cb_state = nullptr;
7473    std::unique_lock<std::mutex> lock(global_lock);
7474    bool skip =
7475        PreCallValidateCmdDispatch(dev_data, commandBuffer, false, VK_PIPELINE_BIND_POINT_COMPUTE, &cb_state, "vkCmdDispatch()");
7476    lock.unlock();
7477    if (!skip) {
7478        dev_data->dispatch_table.CmdDispatch(commandBuffer, x, y, z);
7479        lock.lock();
7480        PostCallRecordCmdDispatch(dev_data, cb_state, VK_PIPELINE_BIND_POINT_COMPUTE);
7481        lock.unlock();
7482    }
7483}
7484
7485static bool PreCallValidateCmdDispatchIndirect(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkBuffer buffer, bool indexed,
7486                                               VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state,
7487                                               BUFFER_STATE **buffer_state, const char *caller) {
7488    bool skip =
7489        ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DISPATCHINDIRECT, cb_state, caller, VK_QUEUE_COMPUTE_BIT,
7490                            VALIDATION_ERROR_01568, VALIDATION_ERROR_01569, VALIDATION_ERROR_UNDEFINED);
7491    *buffer_state = GetBufferState(dev_data, buffer);
7492    skip |= ValidateMemoryIsBoundToBuffer(dev_data, *buffer_state, caller, VALIDATION_ERROR_02547);
7493    return skip;
7494}
7495
7496static void PostCallRecordCmdDispatchIndirect(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
7497                                              BUFFER_STATE *buffer_state) {
7498    UpdateStateCmdDrawDispatchType(dev_data, cb_state, bind_point, CMD_DISPATCHINDIRECT);
7499    AddCommandBufferBindingBuffer(dev_data, cb_state, buffer_state);
7500}
7501
7502VKAPI_ATTR void VKAPI_CALL CmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
7503    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7504    GLOBAL_CB_NODE *cb_state = nullptr;
7505    BUFFER_STATE *buffer_state = nullptr;
7506    std::unique_lock<std::mutex> lock(global_lock);
7507    bool skip = PreCallValidateCmdDispatchIndirect(dev_data, commandBuffer, buffer, false, VK_PIPELINE_BIND_POINT_COMPUTE,
7508                                                   &cb_state, &buffer_state, "vkCmdDispatchIndirect()");
7509    lock.unlock();
7510    if (!skip) {
7511        dev_data->dispatch_table.CmdDispatchIndirect(commandBuffer, buffer, offset);
7512        lock.lock();
7513        PostCallRecordCmdDispatchIndirect(dev_data, cb_state, VK_PIPELINE_BIND_POINT_COMPUTE, buffer_state);
7514        lock.unlock();
7515    }
7516}
7517
7518VKAPI_ATTR void VKAPI_CALL CmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
7519                                         uint32_t regionCount, const VkBufferCopy *pRegions) {
7520    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7521    std::unique_lock<std::mutex> lock(global_lock);
7522
7523    auto cb_node = GetCBNode(device_data, commandBuffer);
7524    auto src_buffer_state = GetBufferState(device_data, srcBuffer);
7525    auto dst_buffer_state = GetBufferState(device_data, dstBuffer);
7526
7527    if (cb_node && src_buffer_state && dst_buffer_state) {
7528        bool skip = PreCallValidateCmdCopyBuffer(device_data, cb_node, src_buffer_state, dst_buffer_state);
7529        if (!skip) {
7530            PreCallRecordCmdCopyBuffer(device_data, cb_node, src_buffer_state, dst_buffer_state);
7531            lock.unlock();
7532            device_data->dispatch_table.CmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions);
7533        }
7534    } else {
7535        lock.unlock();
7536        assert(0);
7537    }
7538}
7539
7540VKAPI_ATTR void VKAPI_CALL CmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
7541                                        VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
7542                                        const VkImageCopy *pRegions) {
7543    bool skip = false;
7544    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7545    std::unique_lock<std::mutex> lock(global_lock);
7546
7547    auto cb_node = GetCBNode(device_data, commandBuffer);
7548    auto src_image_state = GetImageState(device_data, srcImage);
7549    auto dst_image_state = GetImageState(device_data, dstImage);
7550    if (cb_node && src_image_state && dst_image_state) {
7551        skip = PreCallValidateCmdCopyImage(device_data, cb_node, src_image_state, dst_image_state, regionCount, pRegions,
7552                                           srcImageLayout, dstImageLayout);
7553        if (!skip) {
7554            PreCallRecordCmdCopyImage(device_data, cb_node, src_image_state, dst_image_state, regionCount, pRegions, srcImageLayout,
7555                                      dstImageLayout);
7556            lock.unlock();
7557            device_data->dispatch_table.CmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
7558                                                     pRegions);
7559        }
7560    } else {
7561        lock.unlock();
7562        assert(0);
7563    }
7564}
7565
7566// Validate that an image's sampleCount matches the requirement for a specific API call
7567bool ValidateImageSampleCount(layer_data *dev_data, IMAGE_STATE *image_state, VkSampleCountFlagBits sample_count,
7568                              const char *location, UNIQUE_VALIDATION_ERROR_CODE msgCode) {
7569    bool skip = false;
7570    if (image_state->createInfo.samples != sample_count) {
7571        skip =
7572            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
7573                    reinterpret_cast<uint64_t &>(image_state->image), 0, msgCode, "DS",
7574                    "%s for image 0x%" PRIxLEAST64 " was created with a sample count of %s but must be %s. %s", location,
7575                    reinterpret_cast<uint64_t &>(image_state->image), string_VkSampleCountFlagBits(image_state->createInfo.samples),
7576                    string_VkSampleCountFlagBits(sample_count), validation_error_map[msgCode]);
7577    }
7578    return skip;
7579}
7580
7581VKAPI_ATTR void VKAPI_CALL CmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
7582                                        VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
7583                                        const VkImageBlit *pRegions, VkFilter filter) {
7584    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7585    std::unique_lock<std::mutex> lock(global_lock);
7586
7587    auto cb_node = GetCBNode(dev_data, commandBuffer);
7588    auto src_image_state = GetImageState(dev_data, srcImage);
7589    auto dst_image_state = GetImageState(dev_data, dstImage);
7590
7591    bool skip = PreCallValidateCmdBlitImage(dev_data, cb_node, src_image_state, dst_image_state, regionCount, pRegions, filter);
7592
7593    if (!skip) {
7594        PreCallRecordCmdBlitImage(dev_data, cb_node, src_image_state, dst_image_state);
7595        lock.unlock();
7596        dev_data->dispatch_table.CmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
7597                                              pRegions, filter);
7598    }
7599}
7600
7601VKAPI_ATTR void VKAPI_CALL CmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
7602                                                VkImageLayout dstImageLayout, uint32_t regionCount,
7603                                                const VkBufferImageCopy *pRegions) {
7604    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7605    std::unique_lock<std::mutex> lock(global_lock);
7606    bool skip = false;
7607    auto cb_node = GetCBNode(device_data, commandBuffer);
7608    auto src_buffer_state = GetBufferState(device_data, srcBuffer);
7609    auto dst_image_state = GetImageState(device_data, dstImage);
7610    if (cb_node && src_buffer_state && dst_image_state) {
7611        skip = PreCallValidateCmdCopyBufferToImage(device_data, dstImageLayout, cb_node, src_buffer_state, dst_image_state,
7612                                                        regionCount, pRegions, "vkCmdCopyBufferToImage()");
7613    } else {
7614        lock.unlock();
7615        assert(0);
7616        // TODO: report VU01244 here, or put in object tracker?
7617    }
7618    if (!skip) {
7619        PreCallRecordCmdCopyBufferToImage(device_data, cb_node, src_buffer_state, dst_image_state, regionCount, pRegions,
7620                                          dstImageLayout);
7621        lock.unlock();
7622        device_data->dispatch_table.CmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions);
7623    }
7624}
7625
7626VKAPI_ATTR void VKAPI_CALL CmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
7627                                                VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy *pRegions) {
7628    bool skip = false;
7629    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7630    std::unique_lock<std::mutex> lock(global_lock);
7631
7632    auto cb_node = GetCBNode(device_data, commandBuffer);
7633    auto src_image_state = GetImageState(device_data, srcImage);
7634    auto dst_buffer_state = GetBufferState(device_data, dstBuffer);
7635    if (cb_node && src_image_state && dst_buffer_state) {
7636        skip = PreCallValidateCmdCopyImageToBuffer(device_data, srcImageLayout, cb_node, src_image_state, dst_buffer_state,
7637                                                        regionCount, pRegions, "vkCmdCopyImageToBuffer()");
7638    } else {
7639        lock.unlock();
7640        assert(0);
7641        // TODO: report VU01262 here, or put in object tracker?
7642    }
7643    if (!skip) {
7644        PreCallRecordCmdCopyImageToBuffer(device_data, cb_node, src_image_state, dst_buffer_state, regionCount, pRegions,
7645                                          srcImageLayout);
7646        lock.unlock();
7647        device_data->dispatch_table.CmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions);
7648    }
7649}
7650
7651VKAPI_ATTR void VKAPI_CALL CmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
7652                                           VkDeviceSize dataSize, const uint32_t *pData) {
7653    bool skip = false;
7654    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7655    std::unique_lock<std::mutex> lock(global_lock);
7656
7657    auto cb_node = GetCBNode(dev_data, commandBuffer);
7658    auto dst_buff_state = GetBufferState(dev_data, dstBuffer);
7659    if (cb_node && dst_buff_state) {
7660        skip |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_state, "vkCmdUpdateBuffer()", VALIDATION_ERROR_02530);
7661        // Update bindings between buffer and cmd buffer
7662        AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_state);
7663        // Validate that DST buffer has correct usage flags set
7664        skip |= ValidateBufferUsageFlags(dev_data, dst_buff_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, VALIDATION_ERROR_01146,
7665                                         "vkCmdUpdateBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
7666        std::function<bool()> function = [=]() {
7667            SetBufferMemoryValid(dev_data, dst_buff_state, true);
7668            return false;
7669        };
7670        cb_node->validate_functions.push_back(function);
7671
7672        skip |= ValidateCmdQueueFlags(dev_data, cb_node, "vkCmdUpdateBuffer()",
7673                                      VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, VALIDATION_ERROR_01154);
7674        skip |= ValidateCmd(dev_data, cb_node, CMD_UPDATEBUFFER, "vkCmdUpdateBuffer()");
7675        UpdateCmdBufferLastCmd(cb_node, CMD_UPDATEBUFFER);
7676        skip |= insideRenderPass(dev_data, cb_node, "vkCmdUpdateBuffer()", VALIDATION_ERROR_01155);
7677    } else {
7678        assert(0);
7679    }
7680    lock.unlock();
7681    if (!skip) dev_data->dispatch_table.CmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData);
7682}
7683
7684VKAPI_ATTR void VKAPI_CALL CmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
7685                                         VkDeviceSize size, uint32_t data) {
7686    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7687    std::unique_lock<std::mutex> lock(global_lock);
7688    auto cb_node = GetCBNode(device_data, commandBuffer);
7689    auto buffer_state = GetBufferState(device_data, dstBuffer);
7690
7691    if (cb_node && buffer_state) {
7692        bool skip = PreCallValidateCmdFillBuffer(device_data, cb_node, buffer_state);
7693        if (!skip) {
7694            PreCallRecordCmdFillBuffer(device_data, cb_node, buffer_state);
7695            lock.unlock();
7696            device_data->dispatch_table.CmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
7697        }
7698    } else {
7699        lock.unlock();
7700        assert(0);
7701    }
7702}
7703
7704VKAPI_ATTR void VKAPI_CALL CmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
7705                                               const VkClearAttachment *pAttachments, uint32_t rectCount,
7706                                               const VkClearRect *pRects) {
7707    bool skip = false;
7708    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7709    {
7710        std::lock_guard<std::mutex> lock(global_lock);
7711        skip = PreCallValidateCmdClearAttachments(dev_data, commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
7712    }
7713    if (!skip) dev_data->dispatch_table.CmdClearAttachments(commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
7714}
7715
7716VKAPI_ATTR void VKAPI_CALL CmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
7717                                              const VkClearColorValue *pColor, uint32_t rangeCount,
7718                                              const VkImageSubresourceRange *pRanges) {
7719    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7720    std::unique_lock<std::mutex> lock(global_lock);
7721
7722    bool skip = PreCallValidateCmdClearColorImage(dev_data, commandBuffer, image, imageLayout, rangeCount, pRanges);
7723    if (!skip) {
7724        PreCallRecordCmdClearImage(dev_data, commandBuffer, image, imageLayout, rangeCount, pRanges, CMD_CLEARCOLORIMAGE);
7725        lock.unlock();
7726        dev_data->dispatch_table.CmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
7727    }
7728}
7729
7730VKAPI_ATTR void VKAPI_CALL CmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
7731                                                     const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
7732                                                     const VkImageSubresourceRange *pRanges) {
7733    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7734    std::unique_lock<std::mutex> lock(global_lock);
7735
7736    bool skip = PreCallValidateCmdClearDepthStencilImage(dev_data, commandBuffer, image, imageLayout, rangeCount, pRanges);
7737    if (!skip) {
7738        PreCallRecordCmdClearImage(dev_data, commandBuffer, image, imageLayout, rangeCount, pRanges, CMD_CLEARDEPTHSTENCILIMAGE);
7739        lock.unlock();
7740        dev_data->dispatch_table.CmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount, pRanges);
7741    }
7742}
7743
7744VKAPI_ATTR void VKAPI_CALL CmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
7745                                           VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
7746                                           const VkImageResolve *pRegions) {
7747    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7748    std::unique_lock<std::mutex> lock(global_lock);
7749
7750    auto cb_node = GetCBNode(dev_data, commandBuffer);
7751    auto src_image_state = GetImageState(dev_data, srcImage);
7752    auto dst_image_state = GetImageState(dev_data, dstImage);
7753
7754    bool skip = PreCallValidateCmdResolveImage(dev_data, cb_node, src_image_state, dst_image_state, regionCount, pRegions);
7755
7756    if (!skip) {
7757        PreCallRecordCmdResolveImage(dev_data, cb_node, src_image_state, dst_image_state);
7758        lock.unlock();
7759        dev_data->dispatch_table.CmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
7760                                                 pRegions);
7761    }
7762}
7763
7764VKAPI_ATTR void VKAPI_CALL GetImageSubresourceLayout(VkDevice device, VkImage image, const VkImageSubresource *pSubresource,
7765                                                     VkSubresourceLayout *pLayout) {
7766    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
7767
7768    bool skip = PreCallValidateGetImageSubresourceLayout(device_data, image, pSubresource);
7769    if (!skip) {
7770        device_data->dispatch_table.GetImageSubresourceLayout(device, image, pSubresource, pLayout);
7771    }
7772}
7773
7774bool setEventStageMask(VkQueue queue, VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
7775    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7776    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
7777    if (pCB) {
7778        pCB->eventToStageMap[event] = stageMask;
7779    }
7780    auto queue_data = dev_data->queueMap.find(queue);
7781    if (queue_data != dev_data->queueMap.end()) {
7782        queue_data->second.eventToStageMap[event] = stageMask;
7783    }
7784    return false;
7785}
7786
7787VKAPI_ATTR void VKAPI_CALL CmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
7788    bool skip = false;
7789    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7790    std::unique_lock<std::mutex> lock(global_lock);
7791    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
7792    if (pCB) {
7793        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetEvent()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
7794                                      VALIDATION_ERROR_00237);
7795        skip |= ValidateCmd(dev_data, pCB, CMD_SETEVENT, "vkCmdSetEvent()");
7796        UpdateCmdBufferLastCmd(pCB, CMD_SETEVENT);
7797        skip |= insideRenderPass(dev_data, pCB, "vkCmdSetEvent()", VALIDATION_ERROR_00238);
7798        skip |=
7799            ValidateStageMaskGsTsEnables(dev_data, stageMask, "vkCmdSetEvent()", VALIDATION_ERROR_00230, VALIDATION_ERROR_00231);
7800        auto event_state = GetEventNode(dev_data, event);
7801        if (event_state) {
7802            addCommandBufferBinding(&event_state->cb_bindings,
7803                                    {reinterpret_cast<uint64_t &>(event), kVulkanObjectTypeEvent}, pCB);
7804            event_state->cb_bindings.insert(pCB);
7805        }
7806        pCB->events.push_back(event);
7807        if (!pCB->waitedEvents.count(event)) {
7808            pCB->writeEventsBeforeWait.push_back(event);
7809        }
7810        std::function<bool(VkQueue)> eventUpdate =
7811            std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, stageMask);
7812        pCB->eventUpdates.push_back(eventUpdate);
7813    }
7814    lock.unlock();
7815    if (!skip) dev_data->dispatch_table.CmdSetEvent(commandBuffer, event, stageMask);
7816}
7817
7818VKAPI_ATTR void VKAPI_CALL CmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
7819    bool skip = false;
7820    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7821    std::unique_lock<std::mutex> lock(global_lock);
7822    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
7823    if (pCB) {
7824        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdResetEvent()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
7825                                      VALIDATION_ERROR_00248);
7826        skip |= ValidateCmd(dev_data, pCB, CMD_RESETEVENT, "vkCmdResetEvent()");
7827        UpdateCmdBufferLastCmd(pCB, CMD_RESETEVENT);
7828        skip |= insideRenderPass(dev_data, pCB, "vkCmdResetEvent()", VALIDATION_ERROR_00249);
7829        skip |=
7830            ValidateStageMaskGsTsEnables(dev_data, stageMask, "vkCmdResetEvent()", VALIDATION_ERROR_00240, VALIDATION_ERROR_00241);
7831        auto event_state = GetEventNode(dev_data, event);
7832        if (event_state) {
7833            addCommandBufferBinding(&event_state->cb_bindings,
7834                                    {reinterpret_cast<uint64_t &>(event), kVulkanObjectTypeEvent}, pCB);
7835            event_state->cb_bindings.insert(pCB);
7836        }
7837        pCB->events.push_back(event);
7838        if (!pCB->waitedEvents.count(event)) {
7839            pCB->writeEventsBeforeWait.push_back(event);
7840        }
7841        // TODO : Add check for VALIDATION_ERROR_00226
7842        std::function<bool(VkQueue)> eventUpdate =
7843            std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, VkPipelineStageFlags(0));
7844        pCB->eventUpdates.push_back(eventUpdate);
7845    }
7846    lock.unlock();
7847    if (!skip) dev_data->dispatch_table.CmdResetEvent(commandBuffer, event, stageMask);
7848}
7849
7850static bool ValidateBarriers(const char *funcName, VkCommandBuffer cmdBuffer, uint32_t memBarrierCount,
7851                             const VkMemoryBarrier *pMemBarriers, uint32_t bufferBarrierCount,
7852                             const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
7853                             const VkImageMemoryBarrier *pImageMemBarriers) {
7854    bool skip = false;
7855    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(cmdBuffer), layer_data_map);
7856    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, cmdBuffer);
7857    if (pCB->activeRenderPass && memBarrierCount) {
7858        if (!pCB->activeRenderPass->hasSelfDependency[pCB->activeSubpass]) {
7859            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7860                            reinterpret_cast<uint64_t>(cmdBuffer), __LINE__, DRAWSTATE_INVALID_BARRIER, "DS",
7861                            "%s: Barriers cannot be set during subpass %d "
7862                            "with no self dependency specified.",
7863                            funcName, pCB->activeSubpass);
7864        }
7865    }
7866    for (uint32_t i = 0; i < imageMemBarrierCount; ++i) {
7867        auto mem_barrier = &pImageMemBarriers[i];
7868        auto image_data = GetImageState(dev_data, mem_barrier->image);
7869        if (image_data) {
7870            uint32_t src_q_f_index = mem_barrier->srcQueueFamilyIndex;
7871            uint32_t dst_q_f_index = mem_barrier->dstQueueFamilyIndex;
7872            if (image_data->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
7873                // srcQueueFamilyIndex and dstQueueFamilyIndex must both
7874                // be VK_QUEUE_FAMILY_IGNORED
7875                if ((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) {
7876                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7877                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(cmdBuffer), __LINE__,
7878                                    DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
7879                                    "%s: Image Barrier for image 0x%" PRIx64
7880                                    " was created with sharingMode of "
7881                                    "VK_SHARING_MODE_CONCURRENT. Src and dst "
7882                                    "queueFamilyIndices must be VK_QUEUE_FAMILY_IGNORED.",
7883                                    funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
7884                }
7885            } else {
7886                // Sharing mode is VK_SHARING_MODE_EXCLUSIVE. srcQueueFamilyIndex and
7887                // dstQueueFamilyIndex must either both be VK_QUEUE_FAMILY_IGNORED,
7888                // or both be a valid queue family
7889                if (((src_q_f_index == VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index == VK_QUEUE_FAMILY_IGNORED)) &&
7890                    (src_q_f_index != dst_q_f_index)) {
7891                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7892                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(cmdBuffer), __LINE__,
7893                                    DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
7894                                    "%s: Image 0x%" PRIx64
7895                                    " was created with sharingMode "
7896                                    "of VK_SHARING_MODE_EXCLUSIVE. If one of src- or "
7897                                    "dstQueueFamilyIndex is VK_QUEUE_FAMILY_IGNORED, both "
7898                                    "must be.",
7899                                    funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
7900                } else if (((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) && (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) &&
7901                           ((src_q_f_index >= dev_data->phys_dev_properties.queue_family_properties.size()) ||
7902                            (dst_q_f_index >= dev_data->phys_dev_properties.queue_family_properties.size()))) {
7903                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7904                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(cmdBuffer), __LINE__,
7905                                    DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
7906                                    "%s: Image 0x%" PRIx64
7907                                    " was created with sharingMode "
7908                                    "of VK_SHARING_MODE_EXCLUSIVE, but srcQueueFamilyIndex %d"
7909                                    " or dstQueueFamilyIndex %d is greater than " PRINTF_SIZE_T_SPECIFIER
7910                                    "queueFamilies crated for this device.",
7911                                    funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image), src_q_f_index, dst_q_f_index,
7912                                    dev_data->phys_dev_properties.queue_family_properties.size());
7913                }
7914            }
7915        }
7916
7917        if (mem_barrier) {
7918            if (mem_barrier->oldLayout != mem_barrier->newLayout) {
7919                skip |=
7920                    ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->srcAccessMask, mem_barrier->oldLayout, "Source");
7921                skip |=
7922                    ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->dstAccessMask, mem_barrier->newLayout, "Dest");
7923            }
7924            if (mem_barrier->newLayout == VK_IMAGE_LAYOUT_UNDEFINED || mem_barrier->newLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
7925                skip |=
7926                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7927                            reinterpret_cast<uint64_t>(cmdBuffer), __LINE__, DRAWSTATE_INVALID_BARRIER, "DS",
7928                            "%s: Image Layout cannot be transitioned to UNDEFINED or "
7929                            "PREINITIALIZED.",
7930                            funcName);
7931            }
7932            VkFormat format = VK_FORMAT_UNDEFINED;
7933            uint32_t arrayLayers = 0, mipLevels = 0;
7934            bool imageFound = false;
7935            if (image_data) {
7936                format = image_data->createInfo.format;
7937                arrayLayers = image_data->createInfo.arrayLayers;
7938                mipLevels = image_data->createInfo.mipLevels;
7939                imageFound = true;
7940            } else if (dev_data->device_extensions.khr_swapchain_enabled) {
7941                auto imageswap_data = GetSwapchainFromImage(dev_data, mem_barrier->image);
7942                if (imageswap_data) {
7943                    auto swapchain_data = GetSwapchainNode(dev_data, imageswap_data);
7944                    if (swapchain_data) {
7945                        format = swapchain_data->createInfo.imageFormat;
7946                        arrayLayers = swapchain_data->createInfo.imageArrayLayers;
7947                        mipLevels = 1;
7948                        imageFound = true;
7949                    }
7950                }
7951            }
7952            if (imageFound) {
7953                skip |= ValidateImageSubrangeLevelLayerCounts(dev_data, mem_barrier->subresourceRange, funcName);
7954                auto aspect_mask = mem_barrier->subresourceRange.aspectMask;
7955                skip |= ValidateImageAspectMask(dev_data, image_data->image, format, aspect_mask, funcName);
7956
7957                uint32_t layer_count = ResolveRemainingLayers(&mem_barrier->subresourceRange, image_data->createInfo.arrayLayers);
7958                if ((mem_barrier->subresourceRange.baseArrayLayer + layer_count) > arrayLayers) {
7959                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7960                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(cmdBuffer), __LINE__,
7961                                    DRAWSTATE_INVALID_BARRIER, "DS",
7962                                    "%s: Subresource must have the sum of the baseArrayLayer (%d) and layerCount (%d) be less "
7963                                    "than or equal to the total number of layers (%d).",
7964                                    funcName, mem_barrier->subresourceRange.baseArrayLayer, layer_count, arrayLayers);
7965                }
7966
7967                uint32_t level_count = ResolveRemainingLevels(&mem_barrier->subresourceRange, image_data->createInfo.mipLevels);
7968                if ((mem_barrier->subresourceRange.baseMipLevel + level_count) > mipLevels) {
7969                    skip |= log_msg(
7970                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7971                        reinterpret_cast<uint64_t>(cmdBuffer), __LINE__, DRAWSTATE_INVALID_BARRIER, "DS",
7972                        "%s: Subresource must have the sum of the baseMipLevel (%d) and levelCount (%d) be less than or equal to "
7973                        "the total number of levels (%d).",
7974                        funcName, mem_barrier->subresourceRange.baseMipLevel, level_count, mipLevels);
7975                }
7976            }
7977        }
7978    }
7979    for (uint32_t i = 0; i < bufferBarrierCount; ++i) {
7980        auto mem_barrier = &pBufferMemBarriers[i];
7981        if (pCB->activeRenderPass) {
7982            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7983                            reinterpret_cast<uint64_t>(cmdBuffer), __LINE__, DRAWSTATE_INVALID_BARRIER, "DS",
7984                            "%s: Buffer Barriers cannot be used during a render pass.", funcName);
7985        }
7986        if (!mem_barrier) continue;
7987
7988        // Validate buffer barrier queue family indices
7989        if ((mem_barrier->srcQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
7990             mem_barrier->srcQueueFamilyIndex >= dev_data->phys_dev_properties.queue_family_properties.size()) ||
7991            (mem_barrier->dstQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
7992             mem_barrier->dstQueueFamilyIndex >= dev_data->phys_dev_properties.queue_family_properties.size())) {
7993            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7994                            reinterpret_cast<uint64_t>(cmdBuffer), __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
7995                            "%s: Buffer Barrier 0x%" PRIx64
7996                            " has QueueFamilyIndex greater "
7997                            "than the number of QueueFamilies (" PRINTF_SIZE_T_SPECIFIER ") for this device.",
7998                            funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
7999                            dev_data->phys_dev_properties.queue_family_properties.size());
8000        }
8001
8002        auto buffer_state = GetBufferState(dev_data, mem_barrier->buffer);
8003        if (buffer_state) {
8004            auto buffer_size = buffer_state->requirements.size;
8005            if (mem_barrier->offset >= buffer_size) {
8006                skip |= log_msg(
8007                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8008                    reinterpret_cast<uint64_t>(cmdBuffer), __LINE__, DRAWSTATE_INVALID_BARRIER, "DS",
8009                    "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64 " which is not less than total size 0x%" PRIx64 ".",
8010                    funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
8011                    reinterpret_cast<const uint64_t &>(mem_barrier->offset), reinterpret_cast<const uint64_t &>(buffer_size));
8012            } else if (mem_barrier->size != VK_WHOLE_SIZE && (mem_barrier->offset + mem_barrier->size > buffer_size)) {
8013                skip |=
8014                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8015                            reinterpret_cast<uint64_t>(cmdBuffer), __LINE__, DRAWSTATE_INVALID_BARRIER, "DS",
8016                            "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64 " and size 0x%" PRIx64
8017                            " whose sum is greater than total size 0x%" PRIx64 ".",
8018                            funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
8019                            reinterpret_cast<const uint64_t &>(mem_barrier->offset),
8020                            reinterpret_cast<const uint64_t &>(mem_barrier->size), reinterpret_cast<const uint64_t &>(buffer_size));
8021            }
8022        }
8023    }
8024    return skip;
8025}
8026
8027bool validateEventStageMask(VkQueue queue, GLOBAL_CB_NODE *pCB, uint32_t eventCount, size_t firstEventIndex,
8028                            VkPipelineStageFlags sourceStageMask) {
8029    bool skip = false;
8030    VkPipelineStageFlags stageMask = 0;
8031    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
8032    for (uint32_t i = 0; i < eventCount; ++i) {
8033        auto event = pCB->events[firstEventIndex + i];
8034        auto queue_data = dev_data->queueMap.find(queue);
8035        if (queue_data == dev_data->queueMap.end()) return false;
8036        auto event_data = queue_data->second.eventToStageMap.find(event);
8037        if (event_data != queue_data->second.eventToStageMap.end()) {
8038            stageMask |= event_data->second;
8039        } else {
8040            auto global_event_data = GetEventNode(dev_data, event);
8041            if (!global_event_data) {
8042                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
8043                                reinterpret_cast<const uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
8044                                "Event 0x%" PRIx64 " cannot be waited on if it has never been set.",
8045                                reinterpret_cast<const uint64_t &>(event));
8046            } else {
8047                stageMask |= global_event_data->stageMask;
8048            }
8049        }
8050    }
8051    // TODO: Need to validate that host_bit is only set if set event is called
8052    // but set event can be called at any time.
8053    if (sourceStageMask != stageMask && sourceStageMask != (stageMask | VK_PIPELINE_STAGE_HOST_BIT)) {
8054        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8055                        reinterpret_cast<uint64_t>(pCB->commandBuffer), __LINE__, VALIDATION_ERROR_00254, "DS",
8056                        "Submitting cmdbuffer with call to VkCmdWaitEvents "
8057                        "using srcStageMask 0x%X which must be the bitwise "
8058                        "OR of the stageMask parameters used in calls to "
8059                        "vkCmdSetEvent and VK_PIPELINE_STAGE_HOST_BIT if "
8060                        "used with vkSetEvent but instead is 0x%X. %s",
8061                        sourceStageMask, stageMask, validation_error_map[VALIDATION_ERROR_00254]);
8062    }
8063    return skip;
8064}
8065
8066// Note that we only check bits that HAVE required queueflags -- don't care entries are skipped
8067static std::unordered_map<VkPipelineStageFlags, VkQueueFlags> supported_pipeline_stages_table = {
8068    {VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT},
8069    {VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT},
8070    {VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, VK_QUEUE_GRAPHICS_BIT},
8071    {VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
8072    {VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
8073    {VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
8074    {VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
8075    {VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
8076    {VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT, VK_QUEUE_GRAPHICS_BIT},
8077    {VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT, VK_QUEUE_GRAPHICS_BIT},
8078    {VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_QUEUE_GRAPHICS_BIT},
8079    {VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_QUEUE_COMPUTE_BIT},
8080    {VK_PIPELINE_STAGE_TRANSFER_BIT, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT},
8081    {VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, VK_QUEUE_GRAPHICS_BIT}};
8082
8083static const VkPipelineStageFlags stage_flag_bit_array[] = {VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX,
8084                                                            VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT,
8085                                                            VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
8086                                                            VK_PIPELINE_STAGE_VERTEX_SHADER_BIT,
8087                                                            VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT,
8088                                                            VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT,
8089                                                            VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT,
8090                                                            VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
8091                                                            VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT,
8092                                                            VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
8093                                                            VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
8094                                                            VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
8095                                                            VK_PIPELINE_STAGE_TRANSFER_BIT,
8096                                                            VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT};
8097
8098bool CheckStageMaskQueueCompatibility(layer_data *dev_data, VkCommandBuffer command_buffer, VkPipelineStageFlags stage_mask,
8099                                      VkQueueFlags queue_flags, const char *function, const char *src_or_dest,
8100                                      UNIQUE_VALIDATION_ERROR_CODE error_code) {
8101    bool skip = false;
8102    // Lookup each bit in the stagemask and check for overlap between its table bits and queue_flags
8103    for (const auto &item : stage_flag_bit_array) {
8104        if (stage_mask & item) {
8105            if ((supported_pipeline_stages_table[item] & queue_flags) == 0) {
8106                skip |=
8107                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8108                            reinterpret_cast<uint64_t &>(command_buffer), __LINE__, error_code, "DL",
8109                            "%s(): %s flag %s is not compatible with the queue family properties of this "
8110                            "command buffer. %s",
8111                            function, src_or_dest, string_VkPipelineStageFlagBits(static_cast<VkPipelineStageFlagBits>(item)),
8112                            validation_error_map[error_code]);
8113            }
8114        }
8115    }
8116    return skip;
8117}
8118
8119bool ValidateStageMasksAgainstQueueCapabilities(layer_data *dev_data, GLOBAL_CB_NODE *cb_state,
8120                                                VkPipelineStageFlags source_stage_mask, VkPipelineStageFlags dest_stage_mask,
8121                                                const char *function, UNIQUE_VALIDATION_ERROR_CODE error_code) {
8122    bool skip = false;
8123    uint32_t queue_family_index = dev_data->commandPoolMap[cb_state->createInfo.commandPool].queueFamilyIndex;
8124    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(dev_data->physical_device), instance_layer_data_map);
8125    auto physical_device_state = GetPhysicalDeviceState(instance_data, dev_data->physical_device);
8126
8127    // Any pipeline stage included in srcStageMask or dstStageMask must be supported by the capabilities of the queue family
8128    // specified by the queueFamilyIndex member of the VkCommandPoolCreateInfo structure that was used to create the VkCommandPool
8129    // that commandBuffer was allocated from, as specified in the table of supported pipeline stages.
8130
8131    if (queue_family_index < physical_device_state->queue_family_properties.size()) {
8132        VkQueueFlags specified_queue_flags = physical_device_state->queue_family_properties[queue_family_index].queueFlags;
8133
8134        if ((source_stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) == 0) {
8135            skip |= CheckStageMaskQueueCompatibility(dev_data, cb_state->commandBuffer, source_stage_mask, specified_queue_flags,
8136                                                     function, "srcStageMask", error_code);
8137        }
8138        if ((dest_stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) == 0) {
8139            skip |= CheckStageMaskQueueCompatibility(dev_data, cb_state->commandBuffer, dest_stage_mask, specified_queue_flags,
8140                                                     function, "dstStageMask", error_code);
8141        }
8142    }
8143    return skip;
8144}
8145
8146VKAPI_ATTR void VKAPI_CALL CmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
8147                                         VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask,
8148                                         uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
8149                                         uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
8150                                         uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
8151    bool skip = false;
8152    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8153    std::unique_lock<std::mutex> lock(global_lock);
8154    GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
8155    if (cb_state) {
8156        skip |= ValidateStageMasksAgainstQueueCapabilities(dev_data, cb_state, sourceStageMask, dstStageMask, "vkCmdWaitEvents",
8157                                                           VALIDATION_ERROR_02510);
8158        skip |= ValidateStageMaskGsTsEnables(dev_data, sourceStageMask, "vkCmdWaitEvents()", VALIDATION_ERROR_02067,
8159                                             VALIDATION_ERROR_02069);
8160        skip |= ValidateStageMaskGsTsEnables(dev_data, dstStageMask, "vkCmdWaitEvents()", VALIDATION_ERROR_02068,
8161                                             VALIDATION_ERROR_02070);
8162        auto first_event_index = cb_state->events.size();
8163        for (uint32_t i = 0; i < eventCount; ++i) {
8164            auto event_state = GetEventNode(dev_data, pEvents[i]);
8165            if (event_state) {
8166                addCommandBufferBinding(&event_state->cb_bindings,
8167                                        {reinterpret_cast<const uint64_t &>(pEvents[i]), kVulkanObjectTypeEvent},
8168                                        cb_state);
8169                event_state->cb_bindings.insert(cb_state);
8170            }
8171            cb_state->waitedEvents.insert(pEvents[i]);
8172            cb_state->events.push_back(pEvents[i]);
8173        }
8174        std::function<bool(VkQueue)> event_update =
8175            std::bind(validateEventStageMask, std::placeholders::_1, cb_state, eventCount, first_event_index, sourceStageMask);
8176        cb_state->eventUpdates.push_back(event_update);
8177        skip |= ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdWaitEvents()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
8178                                      VALIDATION_ERROR_00262);
8179        skip |= ValidateCmd(dev_data, cb_state, CMD_WAITEVENTS, "vkCmdWaitEvents()");
8180        UpdateCmdBufferLastCmd(cb_state, CMD_WAITEVENTS);
8181        skip |=
8182            ValidateBarriersToImages(dev_data, commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers, "vkCmdWaitEvents()");
8183        if (!skip) {
8184            TransitionImageLayouts(dev_data, commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
8185        }
8186
8187        skip |= ValidateBarriers("vkCmdWaitEvents()", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8188                                 pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8189    }
8190    lock.unlock();
8191    if (!skip)
8192        dev_data->dispatch_table.CmdWaitEvents(commandBuffer, eventCount, pEvents, sourceStageMask, dstStageMask,
8193                                               memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
8194                                               imageMemoryBarrierCount, pImageMemoryBarriers);
8195}
8196
8197static bool PreCallValidateCmdPipelineBarrier(layer_data *device_data, GLOBAL_CB_NODE *cb_state, VkCommandBuffer commandBuffer,
8198                                              VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
8199                                              uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
8200                                              uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
8201                                              uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
8202    bool skip = false;
8203    skip |= ValidateStageMasksAgainstQueueCapabilities(device_data, cb_state, srcStageMask, dstStageMask, "vkCmdPipelineBarrier",
8204                                                       VALIDATION_ERROR_02513);
8205    skip |= ValidateCmdQueueFlags(device_data, cb_state, "vkCmdPipelineBarrier()",
8206                                  VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, VALIDATION_ERROR_00280);
8207    skip |= ValidateCmd(device_data, cb_state, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()");
8208    skip |= ValidateStageMaskGsTsEnables(device_data, srcStageMask, "vkCmdPipelineBarrier()", VALIDATION_ERROR_00265,
8209                                         VALIDATION_ERROR_00267);
8210    skip |= ValidateStageMaskGsTsEnables(device_data, dstStageMask, "vkCmdPipelineBarrier()", VALIDATION_ERROR_00266,
8211                                         VALIDATION_ERROR_00268);
8212    skip |= ValidateBarriersToImages(device_data, commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers,
8213                                     "vkCmdPipelineBarrier()");
8214    skip |= ValidateBarriers("vkCmdPipelineBarrier()", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8215                             pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8216    return skip;
8217}
8218
8219static void PreCallRecordCmdPipelineBarrier(layer_data *device_data, GLOBAL_CB_NODE *cb_state, VkCommandBuffer commandBuffer,
8220                                            uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
8221    UpdateCmdBufferLastCmd(cb_state, CMD_PIPELINEBARRIER);
8222    TransitionImageLayouts(device_data, commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
8223}
8224
8225VKAPI_ATTR void VKAPI_CALL CmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
8226                                              VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
8227                                              uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
8228                                              uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
8229                                              uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
8230    bool skip = false;
8231    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8232    std::unique_lock<std::mutex> lock(global_lock);
8233    GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
8234    if (cb_state) {
8235        skip |= PreCallValidateCmdPipelineBarrier(device_data, cb_state, commandBuffer, srcStageMask, dstStageMask,
8236                                                  memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8237                                                  pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8238        if (!skip) {
8239            PreCallRecordCmdPipelineBarrier(device_data, cb_state, commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
8240        }
8241    } else {
8242        assert(0);
8243    }
8244    lock.unlock();
8245    if (!skip) {
8246        device_data->dispatch_table.CmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags, memoryBarrierCount,
8247                                                       pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
8248                                                       imageMemoryBarrierCount, pImageMemoryBarriers);
8249    }
8250}
8251
8252bool setQueryState(VkQueue queue, VkCommandBuffer commandBuffer, QueryObject object, bool value) {
8253    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8254    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
8255    if (pCB) {
8256        pCB->queryToStateMap[object] = value;
8257    }
8258    auto queue_data = dev_data->queueMap.find(queue);
8259    if (queue_data != dev_data->queueMap.end()) {
8260        queue_data->second.queryToStateMap[object] = value;
8261    }
8262    return false;
8263}
8264
8265VKAPI_ATTR void VKAPI_CALL CmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) {
8266    bool skip = false;
8267    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8268    std::unique_lock<std::mutex> lock(global_lock);
8269    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
8270    if (pCB) {
8271        QueryObject query = {queryPool, slot};
8272        pCB->activeQueries.insert(query);
8273        if (!pCB->startedQueries.count(query)) {
8274            pCB->startedQueries.insert(query);
8275        }
8276        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdBeginQuery()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
8277                                      VALIDATION_ERROR_01039);
8278        skip |= ValidateCmd(dev_data, pCB, CMD_BEGINQUERY, "vkCmdBeginQuery()");
8279        UpdateCmdBufferLastCmd(pCB, CMD_BEGINQUERY);
8280        addCommandBufferBinding(&GetQueryPoolNode(dev_data, queryPool)->cb_bindings,
8281                                {reinterpret_cast<uint64_t &>(queryPool), kVulkanObjectTypeQueryPool}, pCB);
8282    }
8283    lock.unlock();
8284    if (!skip) dev_data->dispatch_table.CmdBeginQuery(commandBuffer, queryPool, slot, flags);
8285}
8286
8287VKAPI_ATTR void VKAPI_CALL CmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) {
8288    bool skip = false;
8289    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8290    std::unique_lock<std::mutex> lock(global_lock);
8291    GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
8292    if (cb_state) {
8293        QueryObject query = {queryPool, slot};
8294        if (!cb_state->activeQueries.count(query)) {
8295            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8296                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, VALIDATION_ERROR_01041, "DS",
8297                            "Ending a query before it was started: queryPool 0x%" PRIx64 ", index %d. %s", (uint64_t)(queryPool),
8298                            slot, validation_error_map[VALIDATION_ERROR_01041]);
8299        } else {
8300            cb_state->activeQueries.erase(query);
8301        }
8302        std::function<bool(VkQueue)> query_update = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, true);
8303        cb_state->queryUpdates.push_back(query_update);
8304        skip |= ValidateCmdQueueFlags(dev_data, cb_state, "VkCmdEndQuery()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
8305                                      VALIDATION_ERROR_01046);
8306        skip |= ValidateCmd(dev_data, cb_state, CMD_ENDQUERY, "VkCmdEndQuery()");
8307        UpdateCmdBufferLastCmd(cb_state, CMD_ENDQUERY);
8308        addCommandBufferBinding(&GetQueryPoolNode(dev_data, queryPool)->cb_bindings,
8309                                {reinterpret_cast<uint64_t &>(queryPool), kVulkanObjectTypeQueryPool}, cb_state);
8310    }
8311    lock.unlock();
8312    if (!skip) dev_data->dispatch_table.CmdEndQuery(commandBuffer, queryPool, slot);
8313}
8314
8315VKAPI_ATTR void VKAPI_CALL CmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
8316                                             uint32_t queryCount) {
8317    bool skip = false;
8318    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8319    std::unique_lock<std::mutex> lock(global_lock);
8320    GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
8321    if (cb_state) {
8322        for (uint32_t i = 0; i < queryCount; i++) {
8323            QueryObject query = {queryPool, firstQuery + i};
8324            cb_state->waitedEventsBeforeQueryReset[query] = cb_state->waitedEvents;
8325            std::function<bool(VkQueue)> query_update =
8326                std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, false);
8327            cb_state->queryUpdates.push_back(query_update);
8328        }
8329        skip |= ValidateCmdQueueFlags(dev_data, cb_state, "VkCmdResetQueryPool()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
8330                                      VALIDATION_ERROR_01024);
8331        skip |= ValidateCmd(dev_data, cb_state, CMD_RESETQUERYPOOL, "VkCmdResetQueryPool()");
8332        UpdateCmdBufferLastCmd(cb_state, CMD_RESETQUERYPOOL);
8333        skip |= insideRenderPass(dev_data, cb_state, "vkCmdResetQueryPool()", VALIDATION_ERROR_01025);
8334        addCommandBufferBinding(&GetQueryPoolNode(dev_data, queryPool)->cb_bindings,
8335                                {reinterpret_cast<uint64_t &>(queryPool), kVulkanObjectTypeQueryPool}, cb_state);
8336    }
8337    lock.unlock();
8338    if (!skip) dev_data->dispatch_table.CmdResetQueryPool(commandBuffer, queryPool, firstQuery, queryCount);
8339}
8340
8341bool validateQuery(VkQueue queue, GLOBAL_CB_NODE *pCB, VkQueryPool queryPool, uint32_t queryCount, uint32_t firstQuery) {
8342    bool skip = false;
8343    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(pCB->commandBuffer), layer_data_map);
8344    auto queue_data = dev_data->queueMap.find(queue);
8345    if (queue_data == dev_data->queueMap.end()) return false;
8346    for (uint32_t i = 0; i < queryCount; i++) {
8347        QueryObject query = {queryPool, firstQuery + i};
8348        auto query_data = queue_data->second.queryToStateMap.find(query);
8349        bool fail = false;
8350        if (query_data != queue_data->second.queryToStateMap.end()) {
8351            if (!query_data->second) {
8352                fail = true;
8353            }
8354        } else {
8355            auto global_query_data = dev_data->queryToStateMap.find(query);
8356            if (global_query_data != dev_data->queryToStateMap.end()) {
8357                if (!global_query_data->second) {
8358                    fail = true;
8359                }
8360            } else {
8361                fail = true;
8362            }
8363        }
8364        if (fail) {
8365            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8366                            reinterpret_cast<uint64_t>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
8367                            "Requesting a copy from query to buffer with invalid query: queryPool 0x%" PRIx64 ", index %d",
8368                            reinterpret_cast<uint64_t &>(queryPool), firstQuery + i);
8369        }
8370    }
8371    return skip;
8372}
8373
8374VKAPI_ATTR void VKAPI_CALL CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
8375                                                   uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset,
8376                                                   VkDeviceSize stride, VkQueryResultFlags flags) {
8377    bool skip = false;
8378    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8379    std::unique_lock<std::mutex> lock(global_lock);
8380
8381    auto cb_node = GetCBNode(dev_data, commandBuffer);
8382    auto dst_buff_state = GetBufferState(dev_data, dstBuffer);
8383    if (cb_node && dst_buff_state) {
8384        skip |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_state, "vkCmdCopyQueryPoolResults()", VALIDATION_ERROR_02526);
8385        // Update bindings between buffer and cmd buffer
8386        AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_state);
8387        // Validate that DST buffer has correct usage flags set
8388        skip |= ValidateBufferUsageFlags(dev_data, dst_buff_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, VALIDATION_ERROR_01066,
8389                                         "vkCmdCopyQueryPoolResults()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8390        std::function<bool()> function = [=]() {
8391            SetBufferMemoryValid(dev_data, dst_buff_state, true);
8392            return false;
8393        };
8394        cb_node->validate_functions.push_back(function);
8395        std::function<bool(VkQueue)> query_update =
8396            std::bind(validateQuery, std::placeholders::_1, cb_node, queryPool, queryCount, firstQuery);
8397        cb_node->queryUpdates.push_back(query_update);
8398        skip |= ValidateCmdQueueFlags(dev_data, cb_node, "vkCmdCopyQueryPoolResults()",
8399                                      VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, VALIDATION_ERROR_01073);
8400        skip |= ValidateCmd(dev_data, cb_node, CMD_COPYQUERYPOOLRESULTS, "vkCmdCopyQueryPoolResults()");
8401        UpdateCmdBufferLastCmd(cb_node, CMD_COPYQUERYPOOLRESULTS);
8402        skip |= insideRenderPass(dev_data, cb_node, "vkCmdCopyQueryPoolResults()", VALIDATION_ERROR_01074);
8403        addCommandBufferBinding(&GetQueryPoolNode(dev_data, queryPool)->cb_bindings,
8404                                {reinterpret_cast<uint64_t &>(queryPool), kVulkanObjectTypeQueryPool}, cb_node);
8405    } else {
8406        assert(0);
8407    }
8408    lock.unlock();
8409    if (!skip)
8410        dev_data->dispatch_table.CmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer, dstOffset,
8411                                                         stride, flags);
8412}
8413
8414VKAPI_ATTR void VKAPI_CALL CmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout, VkShaderStageFlags stageFlags,
8415                                            uint32_t offset, uint32_t size, const void *pValues) {
8416    bool skip = false;
8417    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8418    std::unique_lock<std::mutex> lock(global_lock);
8419    GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
8420    if (cb_state) {
8421        skip |= ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdPushConstants()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
8422                                      VALIDATION_ERROR_00999);
8423        skip |= ValidateCmd(dev_data, cb_state, CMD_PUSHCONSTANTS, "vkCmdPushConstants()");
8424        UpdateCmdBufferLastCmd(cb_state, CMD_PUSHCONSTANTS);
8425    }
8426    skip |= validatePushConstantRange(dev_data, offset, size, "vkCmdPushConstants()");
8427    if (0 == stageFlags) {
8428        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8429                        reinterpret_cast<uint64_t>(commandBuffer), __LINE__, VALIDATION_ERROR_00996, "DS",
8430                        "vkCmdPushConstants() call has no stageFlags set. %s", validation_error_map[VALIDATION_ERROR_00996]);
8431    }
8432
8433    // Check if specified push constant range falls within a pipeline-defined range which has matching stageFlags.
8434    // The spec doesn't seem to disallow having multiple push constant ranges with the
8435    // same offset and size, but different stageFlags.  So we can't just check the
8436    // stageFlags in the first range with matching offset and size.
8437    if (!skip) {
8438        const auto &ranges = getPipelineLayout(dev_data, layout)->push_constant_ranges;
8439        bool found_matching_range = false;
8440        for (const auto &range : ranges) {
8441            if ((stageFlags == range.stageFlags) && (offset >= range.offset) && (offset + size <= range.offset + range.size)) {
8442                found_matching_range = true;
8443                break;
8444            }
8445        }
8446        if (!found_matching_range) {
8447            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8448                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, VALIDATION_ERROR_00988, "DS",
8449                            "vkCmdPushConstants() stageFlags = 0x%" PRIx32
8450                            " do not match the stageFlags in any of the ranges with"
8451                            " offset = %d and size = %d in pipeline layout 0x%" PRIx64 ". %s",
8452                            (uint32_t)stageFlags, offset, size, (uint64_t)layout, validation_error_map[VALIDATION_ERROR_00988]);
8453        }
8454    }
8455    lock.unlock();
8456    if (!skip) dev_data->dispatch_table.CmdPushConstants(commandBuffer, layout, stageFlags, offset, size, pValues);
8457}
8458
8459VKAPI_ATTR void VKAPI_CALL CmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage,
8460                                             VkQueryPool queryPool, uint32_t slot) {
8461    bool skip = false;
8462    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8463    std::unique_lock<std::mutex> lock(global_lock);
8464    GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
8465    if (cb_state) {
8466        QueryObject query = {queryPool, slot};
8467        std::function<bool(VkQueue)> query_update = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, true);
8468        cb_state->queryUpdates.push_back(query_update);
8469        skip |= ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdWriteTimestamp()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
8470                                      VALIDATION_ERROR_01082);
8471        skip |= ValidateCmd(dev_data, cb_state, CMD_WRITETIMESTAMP, "vkCmdWriteTimestamp()");
8472        UpdateCmdBufferLastCmd(cb_state, CMD_WRITETIMESTAMP);
8473    }
8474    lock.unlock();
8475    if (!skip) dev_data->dispatch_table.CmdWriteTimestamp(commandBuffer, pipelineStage, queryPool, slot);
8476}
8477
8478static bool MatchUsage(layer_data *dev_data, uint32_t count, const VkAttachmentReference *attachments,
8479                       const VkFramebufferCreateInfo *fbci, VkImageUsageFlagBits usage_flag,
8480                       UNIQUE_VALIDATION_ERROR_CODE error_code) {
8481    bool skip = false;
8482
8483    for (uint32_t attach = 0; attach < count; attach++) {
8484        if (attachments[attach].attachment != VK_ATTACHMENT_UNUSED) {
8485            // Attachment counts are verified elsewhere, but prevent an invalid access
8486            if (attachments[attach].attachment < fbci->attachmentCount) {
8487                const VkImageView *image_view = &fbci->pAttachments[attachments[attach].attachment];
8488                auto view_state = GetImageViewState(dev_data, *image_view);
8489                if (view_state) {
8490                    const VkImageCreateInfo *ici = &GetImageState(dev_data, view_state->create_info.image)->createInfo;
8491                    if (ici != nullptr) {
8492                        if ((ici->usage & usage_flag) == 0) {
8493                            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
8494                                            VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__, error_code, "DS",
8495                                            "vkCreateFramebuffer:  Framebuffer Attachment (%d) conflicts with the image's "
8496                                            "IMAGE_USAGE flags (%s). %s",
8497                                            attachments[attach].attachment, string_VkImageUsageFlagBits(usage_flag),
8498                                            validation_error_map[error_code]);
8499                        }
8500                    }
8501                }
8502            }
8503        }
8504    }
8505    return skip;
8506}
8507
8508// Validate VkFramebufferCreateInfo which includes:
8509// 1. attachmentCount equals renderPass attachmentCount
8510// 2. corresponding framebuffer and renderpass attachments have matching formats
8511// 3. corresponding framebuffer and renderpass attachments have matching sample counts
8512// 4. fb attachments only have a single mip level
8513// 5. fb attachment dimensions are each at least as large as the fb
8514// 6. fb attachments use idenity swizzle
8515// 7. fb attachments used by renderPass for color/input/ds have correct usage bit set
8516// 8. fb dimensions are within physical device limits
8517static bool ValidateFramebufferCreateInfo(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) {
8518    bool skip = false;
8519
8520    auto rp_state = GetRenderPassState(dev_data, pCreateInfo->renderPass);
8521    if (rp_state) {
8522        const VkRenderPassCreateInfo *rpci = rp_state->createInfo.ptr();
8523        if (rpci->attachmentCount != pCreateInfo->attachmentCount) {
8524            skip |= log_msg(
8525                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
8526                reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, VALIDATION_ERROR_00404, "DS",
8527                "vkCreateFramebuffer(): VkFramebufferCreateInfo attachmentCount of %u does not match attachmentCount of %u of "
8528                "renderPass (0x%" PRIxLEAST64 ") being used to create Framebuffer. %s",
8529                pCreateInfo->attachmentCount, rpci->attachmentCount, reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass),
8530                validation_error_map[VALIDATION_ERROR_00404]);
8531        } else {
8532            // attachmentCounts match, so make sure corresponding attachment details line up
8533            const VkImageView *image_views = pCreateInfo->pAttachments;
8534            for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
8535                auto view_state = GetImageViewState(dev_data, image_views[i]);
8536                auto &ivci = view_state->create_info;
8537                if (ivci.format != rpci->pAttachments[i].format) {
8538                    skip |= log_msg(
8539                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
8540                        reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, VALIDATION_ERROR_00408, "DS",
8541                        "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has format of %s that does not match "
8542                        "the format of "
8543                        "%s used by the corresponding attachment for renderPass (0x%" PRIxLEAST64 "). %s",
8544                        i, string_VkFormat(ivci.format), string_VkFormat(rpci->pAttachments[i].format),
8545                        reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), validation_error_map[VALIDATION_ERROR_00408]);
8546                }
8547                const VkImageCreateInfo *ici = &GetImageState(dev_data, ivci.image)->createInfo;
8548                if (ici->samples != rpci->pAttachments[i].samples) {
8549                    skip |= log_msg(
8550                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
8551                        reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, VALIDATION_ERROR_00409, "DS",
8552                        "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has %s samples that do not match "
8553                        "the %s samples used by the corresponding attachment for renderPass (0x%" PRIxLEAST64 "). %s",
8554                        i, string_VkSampleCountFlagBits(ici->samples), string_VkSampleCountFlagBits(rpci->pAttachments[i].samples),
8555                        reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), validation_error_map[VALIDATION_ERROR_00409]);
8556                }
8557                // Verify that view only has a single mip level
8558                if (ivci.subresourceRange.levelCount != 1) {
8559                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
8560                                    0, __LINE__, VALIDATION_ERROR_00411, "DS",
8561                                    "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has mip levelCount of %u "
8562                                    "but only a single mip level (levelCount ==  1) is allowed when creating a Framebuffer. %s",
8563                                    i, ivci.subresourceRange.levelCount, validation_error_map[VALIDATION_ERROR_00411]);
8564                }
8565                const uint32_t mip_level = ivci.subresourceRange.baseMipLevel;
8566                uint32_t mip_width = max(1u, ici->extent.width >> mip_level);
8567                uint32_t mip_height = max(1u, ici->extent.height >> mip_level);
8568                if ((ivci.subresourceRange.layerCount < pCreateInfo->layers) || (mip_width < pCreateInfo->width) ||
8569                    (mip_height < pCreateInfo->height)) {
8570                    skip |=
8571                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8572                                __LINE__, DRAWSTATE_INVALID_FRAMEBUFFER_CREATE_INFO, "DS",
8573                                "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has dimensions smaller "
8574                                "than the corresponding "
8575                                "framebuffer dimensions. Attachment dimensions must be at least as large. Here are the respective "
8576                                "dimensions for "
8577                                "attachment #%u, framebuffer:\n"
8578                                "width: %u, %u\n"
8579                                "height: %u, %u\n"
8580                                "layerCount: %u, %u\n",
8581                                i, ivci.subresourceRange.baseMipLevel, i, mip_width, pCreateInfo->width, mip_height,
8582                                pCreateInfo->height, ivci.subresourceRange.layerCount, pCreateInfo->layers);
8583                }
8584                if (((ivci.components.r != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.r != VK_COMPONENT_SWIZZLE_R)) ||
8585                    ((ivci.components.g != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.g != VK_COMPONENT_SWIZZLE_G)) ||
8586                    ((ivci.components.b != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.b != VK_COMPONENT_SWIZZLE_B)) ||
8587                    ((ivci.components.a != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.a != VK_COMPONENT_SWIZZLE_A))) {
8588                    skip |= log_msg(
8589                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
8590                        VALIDATION_ERROR_00412, "DS",
8591                        "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has non-identy swizzle. All framebuffer "
8592                        "attachments must have been created with the identity swizzle. Here are the actual swizzle values:\n"
8593                        "r swizzle = %s\n"
8594                        "g swizzle = %s\n"
8595                        "b swizzle = %s\n"
8596                        "a swizzle = %s\n"
8597                        "%s",
8598                        i, string_VkComponentSwizzle(ivci.components.r), string_VkComponentSwizzle(ivci.components.g),
8599                        string_VkComponentSwizzle(ivci.components.b), string_VkComponentSwizzle(ivci.components.a),
8600                        validation_error_map[VALIDATION_ERROR_00412]);
8601                }
8602            }
8603        }
8604        // Verify correct attachment usage flags
8605        for (uint32_t subpass = 0; subpass < rpci->subpassCount; subpass++) {
8606            // Verify input attachments:
8607            skip |=
8608                MatchUsage(dev_data, rpci->pSubpasses[subpass].inputAttachmentCount, rpci->pSubpasses[subpass].pInputAttachments,
8609                           pCreateInfo, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, VALIDATION_ERROR_00407);
8610            // Verify color attachments:
8611            skip |=
8612                MatchUsage(dev_data, rpci->pSubpasses[subpass].colorAttachmentCount, rpci->pSubpasses[subpass].pColorAttachments,
8613                           pCreateInfo, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VALIDATION_ERROR_00405);
8614            // Verify depth/stencil attachments:
8615            if (rpci->pSubpasses[subpass].pDepthStencilAttachment != nullptr) {
8616                skip |= MatchUsage(dev_data, 1, rpci->pSubpasses[subpass].pDepthStencilAttachment, pCreateInfo,
8617                                   VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VALIDATION_ERROR_00406);
8618            }
8619        }
8620    }
8621    // Verify FB dimensions are within physical device limits
8622    if (pCreateInfo->width > dev_data->phys_dev_properties.properties.limits.maxFramebufferWidth) {
8623        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
8624                        VALIDATION_ERROR_00413, "DS",
8625                        "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo width exceeds physical device limits. "
8626                        "Requested width: %u, device max: %u\n"
8627                        "%s",
8628                        pCreateInfo->width, dev_data->phys_dev_properties.properties.limits.maxFramebufferWidth,
8629                        validation_error_map[VALIDATION_ERROR_00413]);
8630    }
8631    if (pCreateInfo->height > dev_data->phys_dev_properties.properties.limits.maxFramebufferHeight) {
8632        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
8633                        VALIDATION_ERROR_00414, "DS",
8634                        "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo height exceeds physical device limits. "
8635                        "Requested height: %u, device max: %u\n"
8636                        "%s",
8637                        pCreateInfo->height, dev_data->phys_dev_properties.properties.limits.maxFramebufferHeight,
8638                        validation_error_map[VALIDATION_ERROR_00414]);
8639    }
8640    if (pCreateInfo->layers > dev_data->phys_dev_properties.properties.limits.maxFramebufferLayers) {
8641        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
8642                        VALIDATION_ERROR_00415, "DS",
8643                        "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo layers exceeds physical device limits. "
8644                        "Requested layers: %u, device max: %u\n"
8645                        "%s",
8646                        pCreateInfo->layers, dev_data->phys_dev_properties.properties.limits.maxFramebufferLayers,
8647                        validation_error_map[VALIDATION_ERROR_00415]);
8648    }
8649    // Verify FB dimensions are greater than zero
8650    if (pCreateInfo->width <= 0) {
8651        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
8652                        VALIDATION_ERROR_02806, "DS",
8653                        "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo width must be greater than zero. %s",
8654                        validation_error_map[VALIDATION_ERROR_02806]);
8655    }
8656    if (pCreateInfo->height <= 0) {
8657        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
8658                        VALIDATION_ERROR_02807, "DS",
8659                        "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo height must be greater than zero. %s",
8660                        validation_error_map[VALIDATION_ERROR_02807]);
8661    }
8662    if (pCreateInfo->layers <= 0) {
8663        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
8664                        VALIDATION_ERROR_02808, "DS",
8665                        "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo layers must be greater than zero. %s",
8666                        validation_error_map[VALIDATION_ERROR_02808]);
8667    }
8668    return skip;
8669}
8670
8671// Validate VkFramebufferCreateInfo state prior to calling down chain to create Framebuffer object
8672//  Return true if an error is encountered and callback returns true to skip call down chain
8673//   false indicates that call down chain should proceed
8674static bool PreCallValidateCreateFramebuffer(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) {
8675    // TODO : Verify that renderPass FB is created with is compatible with FB
8676    bool skip = false;
8677    skip |= ValidateFramebufferCreateInfo(dev_data, pCreateInfo);
8678    return skip;
8679}
8680
8681// CreateFramebuffer state has been validated and call down chain completed so record new framebuffer object
8682static void PostCallRecordCreateFramebuffer(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo, VkFramebuffer fb) {
8683    // Shadow create info and store in map
8684    std::unique_ptr<FRAMEBUFFER_STATE> fb_state(
8685        new FRAMEBUFFER_STATE(fb, pCreateInfo, dev_data->renderPassMap[pCreateInfo->renderPass]->createInfo.ptr()));
8686
8687    for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
8688        VkImageView view = pCreateInfo->pAttachments[i];
8689        auto view_state = GetImageViewState(dev_data, view);
8690        if (!view_state) {
8691            continue;
8692        }
8693        MT_FB_ATTACHMENT_INFO fb_info;
8694        fb_info.mem = GetImageState(dev_data, view_state->create_info.image)->binding.mem;
8695        fb_info.view_state = view_state;
8696        fb_info.image = view_state->create_info.image;
8697        fb_state->attachments.push_back(fb_info);
8698    }
8699    dev_data->frameBufferMap[fb] = std::move(fb_state);
8700}
8701
8702VKAPI_ATTR VkResult VKAPI_CALL CreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo,
8703                                                 const VkAllocationCallbacks *pAllocator, VkFramebuffer *pFramebuffer) {
8704    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8705    std::unique_lock<std::mutex> lock(global_lock);
8706    bool skip = PreCallValidateCreateFramebuffer(dev_data, pCreateInfo);
8707    lock.unlock();
8708
8709    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
8710
8711    VkResult result = dev_data->dispatch_table.CreateFramebuffer(device, pCreateInfo, pAllocator, pFramebuffer);
8712
8713    if (VK_SUCCESS == result) {
8714        lock.lock();
8715        PostCallRecordCreateFramebuffer(dev_data, pCreateInfo, *pFramebuffer);
8716        lock.unlock();
8717    }
8718    return result;
8719}
8720
8721static bool FindDependency(const uint32_t index, const uint32_t dependent, const std::vector<DAGNode> &subpass_to_node,
8722                           std::unordered_set<uint32_t> &processed_nodes) {
8723    // If we have already checked this node we have not found a dependency path so return false.
8724    if (processed_nodes.count(index)) return false;
8725    processed_nodes.insert(index);
8726    const DAGNode &node = subpass_to_node[index];
8727    // Look for a dependency path. If one exists return true else recurse on the previous nodes.
8728    if (std::find(node.prev.begin(), node.prev.end(), dependent) == node.prev.end()) {
8729        for (auto elem : node.prev) {
8730            if (FindDependency(elem, dependent, subpass_to_node, processed_nodes)) return true;
8731        }
8732    } else {
8733        return true;
8734    }
8735    return false;
8736}
8737
8738static bool CheckDependencyExists(const layer_data *dev_data, const uint32_t subpass,
8739                                  const std::vector<uint32_t> &dependent_subpasses, const std::vector<DAGNode> &subpass_to_node,
8740                                  bool &skip) {
8741    bool result = true;
8742    // Loop through all subpasses that share the same attachment and make sure a dependency exists
8743    for (uint32_t k = 0; k < dependent_subpasses.size(); ++k) {
8744        if (static_cast<uint32_t>(subpass) == dependent_subpasses[k]) continue;
8745        const DAGNode &node = subpass_to_node[subpass];
8746        // Check for a specified dependency between the two nodes. If one exists we are done.
8747        auto prev_elem = std::find(node.prev.begin(), node.prev.end(), dependent_subpasses[k]);
8748        auto next_elem = std::find(node.next.begin(), node.next.end(), dependent_subpasses[k]);
8749        if (prev_elem == node.prev.end() && next_elem == node.next.end()) {
8750            // If no dependency exits an implicit dependency still might. If not, throw an error.
8751            std::unordered_set<uint32_t> processed_nodes;
8752            if (!(FindDependency(subpass, dependent_subpasses[k], subpass_to_node, processed_nodes) ||
8753                  FindDependency(dependent_subpasses[k], subpass, subpass_to_node, processed_nodes))) {
8754                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8755                                __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
8756                                "A dependency between subpasses %d and %d must exist but one is not specified.", subpass,
8757                                dependent_subpasses[k]);
8758                result = false;
8759            }
8760        }
8761    }
8762    return result;
8763}
8764
8765static bool CheckPreserved(const layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo, const int index,
8766                           const uint32_t attachment, const std::vector<DAGNode> &subpass_to_node, int depth, bool &skip) {
8767    const DAGNode &node = subpass_to_node[index];
8768    // If this node writes to the attachment return true as next nodes need to preserve the attachment.
8769    const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
8770    for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8771        if (attachment == subpass.pColorAttachments[j].attachment) return true;
8772    }
8773    for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8774        if (attachment == subpass.pInputAttachments[j].attachment) return true;
8775    }
8776    if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
8777        if (attachment == subpass.pDepthStencilAttachment->attachment) return true;
8778    }
8779    bool result = false;
8780    // Loop through previous nodes and see if any of them write to the attachment.
8781    for (auto elem : node.prev) {
8782        result |= CheckPreserved(dev_data, pCreateInfo, elem, attachment, subpass_to_node, depth + 1, skip);
8783    }
8784    // If the attachment was written to by a previous node than this node needs to preserve it.
8785    if (result && depth > 0) {
8786        bool has_preserved = false;
8787        for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
8788            if (subpass.pPreserveAttachments[j] == attachment) {
8789                has_preserved = true;
8790                break;
8791            }
8792        }
8793        if (!has_preserved) {
8794            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8795                            __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
8796                            "Attachment %d is used by a later subpass and must be preserved in subpass %d.", attachment, index);
8797        }
8798    }
8799    return result;
8800}
8801
8802template <class T>
8803bool isRangeOverlapping(T offset1, T size1, T offset2, T size2) {
8804    return (((offset1 + size1) > offset2) && ((offset1 + size1) < (offset2 + size2))) ||
8805           ((offset1 > offset2) && (offset1 < (offset2 + size2)));
8806}
8807
8808bool isRegionOverlapping(VkImageSubresourceRange range1, VkImageSubresourceRange range2) {
8809    return (isRangeOverlapping(range1.baseMipLevel, range1.levelCount, range2.baseMipLevel, range2.levelCount) &&
8810            isRangeOverlapping(range1.baseArrayLayer, range1.layerCount, range2.baseArrayLayer, range2.layerCount));
8811}
8812
8813static bool ValidateDependencies(const layer_data *dev_data, FRAMEBUFFER_STATE const *framebuffer,
8814                                 RENDER_PASS_STATE const *renderPass) {
8815    bool skip = false;
8816    auto const pFramebufferInfo = framebuffer->createInfo.ptr();
8817    auto const pCreateInfo = renderPass->createInfo.ptr();
8818    auto const &subpass_to_node = renderPass->subpassToNode;
8819    std::vector<std::vector<uint32_t>> output_attachment_to_subpass(pCreateInfo->attachmentCount);
8820    std::vector<std::vector<uint32_t>> input_attachment_to_subpass(pCreateInfo->attachmentCount);
8821    std::vector<std::vector<uint32_t>> overlapping_attachments(pCreateInfo->attachmentCount);
8822    // Find overlapping attachments
8823    for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
8824        for (uint32_t j = i + 1; j < pCreateInfo->attachmentCount; ++j) {
8825            VkImageView viewi = pFramebufferInfo->pAttachments[i];
8826            VkImageView viewj = pFramebufferInfo->pAttachments[j];
8827            if (viewi == viewj) {
8828                overlapping_attachments[i].push_back(j);
8829                overlapping_attachments[j].push_back(i);
8830                continue;
8831            }
8832            auto view_state_i = GetImageViewState(dev_data, viewi);
8833            auto view_state_j = GetImageViewState(dev_data, viewj);
8834            if (!view_state_i || !view_state_j) {
8835                continue;
8836            }
8837            auto view_ci_i = view_state_i->create_info;
8838            auto view_ci_j = view_state_j->create_info;
8839            if (view_ci_i.image == view_ci_j.image && isRegionOverlapping(view_ci_i.subresourceRange, view_ci_j.subresourceRange)) {
8840                overlapping_attachments[i].push_back(j);
8841                overlapping_attachments[j].push_back(i);
8842                continue;
8843            }
8844            auto image_data_i = GetImageState(dev_data, view_ci_i.image);
8845            auto image_data_j = GetImageState(dev_data, view_ci_j.image);
8846            if (!image_data_i || !image_data_j) {
8847                continue;
8848            }
8849            if (image_data_i->binding.mem == image_data_j->binding.mem &&
8850                isRangeOverlapping(image_data_i->binding.offset, image_data_i->binding.size, image_data_j->binding.offset,
8851                                   image_data_j->binding.size)) {
8852                overlapping_attachments[i].push_back(j);
8853                overlapping_attachments[j].push_back(i);
8854            }
8855        }
8856    }
8857    for (uint32_t i = 0; i < overlapping_attachments.size(); ++i) {
8858        uint32_t attachment = i;
8859        for (auto other_attachment : overlapping_attachments[i]) {
8860            if (!(pCreateInfo->pAttachments[attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
8861                skip |=
8862                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT,
8863                            reinterpret_cast<const uint64_t &>(framebuffer->framebuffer), __LINE__, VALIDATION_ERROR_00324, "DS",
8864                            "Attachment %d aliases attachment %d but doesn't "
8865                            "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT. %s",
8866                            attachment, other_attachment, validation_error_map[VALIDATION_ERROR_00324]);
8867            }
8868            if (!(pCreateInfo->pAttachments[other_attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
8869                skip |=
8870                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT,
8871                            reinterpret_cast<const uint64_t &>(framebuffer->framebuffer), __LINE__, VALIDATION_ERROR_00324, "DS",
8872                            "Attachment %d aliases attachment %d but doesn't "
8873                            "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT. %s",
8874                            other_attachment, attachment, validation_error_map[VALIDATION_ERROR_00324]);
8875            }
8876        }
8877    }
8878    // Find for each attachment the subpasses that use them.
8879    unordered_set<uint32_t> attachmentIndices;
8880    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8881        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8882        attachmentIndices.clear();
8883        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8884            uint32_t attachment = subpass.pInputAttachments[j].attachment;
8885            if (attachment == VK_ATTACHMENT_UNUSED) continue;
8886            input_attachment_to_subpass[attachment].push_back(i);
8887            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
8888                input_attachment_to_subpass[overlapping_attachment].push_back(i);
8889            }
8890        }
8891        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8892            uint32_t attachment = subpass.pColorAttachments[j].attachment;
8893            if (attachment == VK_ATTACHMENT_UNUSED) continue;
8894            output_attachment_to_subpass[attachment].push_back(i);
8895            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
8896                output_attachment_to_subpass[overlapping_attachment].push_back(i);
8897            }
8898            attachmentIndices.insert(attachment);
8899        }
8900        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
8901            uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
8902            output_attachment_to_subpass[attachment].push_back(i);
8903            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
8904                output_attachment_to_subpass[overlapping_attachment].push_back(i);
8905            }
8906
8907            if (attachmentIndices.count(attachment)) {
8908                skip |=
8909                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8910                            __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
8911                            "Cannot use same attachment (%u) as both color and depth output in same subpass (%u).", attachment, i);
8912            }
8913        }
8914    }
8915    // If there is a dependency needed make sure one exists
8916    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8917        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8918        // If the attachment is an input then all subpasses that output must have a dependency relationship
8919        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8920            uint32_t attachment = subpass.pInputAttachments[j].attachment;
8921            if (attachment == VK_ATTACHMENT_UNUSED) continue;
8922            CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip);
8923        }
8924        // If the attachment is an output then all subpasses that use the attachment must have a dependency relationship
8925        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8926            uint32_t attachment = subpass.pColorAttachments[j].attachment;
8927            if (attachment == VK_ATTACHMENT_UNUSED) continue;
8928            CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip);
8929            CheckDependencyExists(dev_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip);
8930        }
8931        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
8932            const uint32_t &attachment = subpass.pDepthStencilAttachment->attachment;
8933            CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip);
8934            CheckDependencyExists(dev_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip);
8935        }
8936    }
8937    // Loop through implicit dependencies, if this pass reads make sure the attachment is preserved for all passes after it was
8938    // written.
8939    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8940        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8941        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8942            CheckPreserved(dev_data, pCreateInfo, i, subpass.pInputAttachments[j].attachment, subpass_to_node, 0, skip);
8943        }
8944    }
8945    return skip;
8946}
8947
8948static bool CreatePassDAG(const layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo,
8949                          std::vector<DAGNode> &subpass_to_node, std::vector<bool> &has_self_dependency) {
8950    bool skip = false;
8951    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8952        DAGNode &subpass_node = subpass_to_node[i];
8953        subpass_node.pass = i;
8954    }
8955    for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
8956        const VkSubpassDependency &dependency = pCreateInfo->pDependencies[i];
8957        if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL || dependency.dstSubpass == VK_SUBPASS_EXTERNAL) {
8958            if (dependency.srcSubpass == dependency.dstSubpass) {
8959                skip |=
8960                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8961                            __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS", "The src and dest subpasses cannot both be external.");
8962            }
8963        } else if (dependency.srcSubpass > dependency.dstSubpass) {
8964            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8965                            __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
8966                            "Depedency graph must be specified such that an earlier pass cannot depend on a later pass.");
8967        } else if (dependency.srcSubpass == dependency.dstSubpass) {
8968            has_self_dependency[dependency.srcSubpass] = true;
8969        } else {
8970            subpass_to_node[dependency.dstSubpass].prev.push_back(dependency.srcSubpass);
8971            subpass_to_node[dependency.srcSubpass].next.push_back(dependency.dstSubpass);
8972        }
8973    }
8974    return skip;
8975}
8976
8977VKAPI_ATTR VkResult VKAPI_CALL CreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo,
8978                                                  const VkAllocationCallbacks *pAllocator, VkShaderModule *pShaderModule) {
8979    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8980    bool skip = false;
8981    spv_result_t spv_valid = SPV_SUCCESS;
8982
8983    if (!GetDisables(dev_data)->shader_validation) {
8984        // Use SPIRV-Tools validator to try and catch any issues with the module itself
8985        spv_context ctx = spvContextCreate(SPV_ENV_VULKAN_1_0);
8986        spv_const_binary_t binary{pCreateInfo->pCode, pCreateInfo->codeSize / sizeof(uint32_t)};
8987        spv_diagnostic diag = nullptr;
8988
8989        spv_valid = spvValidate(ctx, &binary, &diag);
8990        if (spv_valid != SPV_SUCCESS) {
8991            if (!dev_data->device_extensions.nv_glsl_shader_enabled || (pCreateInfo->pCode[0] == spv::MagicNumber)) {
8992                skip |= log_msg(dev_data->report_data,
8993                                spv_valid == SPV_WARNING ? VK_DEBUG_REPORT_WARNING_BIT_EXT : VK_DEBUG_REPORT_ERROR_BIT_EXT,
8994                                VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__, SHADER_CHECKER_INCONSISTENT_SPIRV, "SC",
8995                                "SPIR-V module not valid: %s", diag && diag->error ? diag->error : "(no error text)");
8996            }
8997        }
8998
8999        spvDiagnosticDestroy(diag);
9000        spvContextDestroy(ctx);
9001
9002        if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
9003    }
9004
9005    VkResult res = dev_data->dispatch_table.CreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule);
9006
9007    if (res == VK_SUCCESS && !GetDisables(dev_data)->shader_validation) {
9008        std::lock_guard<std::mutex> lock(global_lock);
9009        const auto new_shader_module = (SPV_SUCCESS == spv_valid ? new shader_module(pCreateInfo) : new shader_module());
9010        dev_data->shaderModuleMap[*pShaderModule] = unique_ptr<shader_module>(new_shader_module);
9011    }
9012    return res;
9013}
9014
9015static bool ValidateAttachmentIndex(layer_data *dev_data, uint32_t attachment, uint32_t attachment_count, const char *type) {
9016    bool skip = false;
9017    if (attachment >= attachment_count && attachment != VK_ATTACHMENT_UNUSED) {
9018        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
9019                        VALIDATION_ERROR_00325, "DS",
9020                        "CreateRenderPass: %s attachment %d must be less than the total number of attachments %d. %s", type,
9021                        attachment, attachment_count, validation_error_map[VALIDATION_ERROR_00325]);
9022    }
9023    return skip;
9024}
9025
9026static bool IsPowerOfTwo(unsigned x) { return x && !(x & (x - 1)); }
9027
9028static bool ValidateRenderpassAttachmentUsage(layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo) {
9029    bool skip = false;
9030    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9031        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9032        if (subpass.pipelineBindPoint != VK_PIPELINE_BIND_POINT_GRAPHICS) {
9033            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
9034                            __LINE__, VALIDATION_ERROR_00347, "DS",
9035                            "CreateRenderPass: Pipeline bind point for subpass %d must be VK_PIPELINE_BIND_POINT_GRAPHICS. %s", i,
9036                            validation_error_map[VALIDATION_ERROR_00347]);
9037        }
9038        for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
9039            uint32_t attachment = subpass.pPreserveAttachments[j];
9040            if (attachment == VK_ATTACHMENT_UNUSED) {
9041                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
9042                                __LINE__, VALIDATION_ERROR_00356, "DS",
9043                                "CreateRenderPass:  Preserve attachment (%d) must not be VK_ATTACHMENT_UNUSED. %s", j,
9044                                validation_error_map[VALIDATION_ERROR_00356]);
9045            } else {
9046                skip |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Preserve");
9047            }
9048        }
9049
9050        auto subpass_performs_resolve =
9051            subpass.pResolveAttachments &&
9052            std::any_of(subpass.pResolveAttachments, subpass.pResolveAttachments + subpass.colorAttachmentCount,
9053                        [](VkAttachmentReference ref) { return ref.attachment != VK_ATTACHMENT_UNUSED; });
9054
9055        unsigned sample_count = 0;
9056
9057        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9058            uint32_t attachment;
9059            if (subpass.pResolveAttachments) {
9060                attachment = subpass.pResolveAttachments[j].attachment;
9061                skip |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Resolve");
9062
9063                if (!skip && attachment != VK_ATTACHMENT_UNUSED &&
9064                    pCreateInfo->pAttachments[attachment].samples != VK_SAMPLE_COUNT_1_BIT) {
9065                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
9066                                    0, __LINE__, VALIDATION_ERROR_00352, "DS",
9067                                    "CreateRenderPass:  Subpass %u requests multisample resolve into attachment %u, "
9068                                    "which must have VK_SAMPLE_COUNT_1_BIT but has %s. %s",
9069                                    i, attachment, string_VkSampleCountFlagBits(pCreateInfo->pAttachments[attachment].samples),
9070                                    validation_error_map[VALIDATION_ERROR_00352]);
9071                }
9072            }
9073            attachment = subpass.pColorAttachments[j].attachment;
9074            skip |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Color");
9075
9076            if (!skip && attachment != VK_ATTACHMENT_UNUSED) {
9077                sample_count |= (unsigned)pCreateInfo->pAttachments[attachment].samples;
9078
9079                if (subpass_performs_resolve && pCreateInfo->pAttachments[attachment].samples == VK_SAMPLE_COUNT_1_BIT) {
9080                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
9081                                    0, __LINE__, VALIDATION_ERROR_00351, "DS",
9082                                    "CreateRenderPass:  Subpass %u requests multisample resolve from attachment %u "
9083                                    "which has VK_SAMPLE_COUNT_1_BIT. %s",
9084                                    i, attachment, validation_error_map[VALIDATION_ERROR_00351]);
9085                }
9086            }
9087        }
9088
9089        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9090            uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
9091            skip |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Depth stencil");
9092
9093            if (!skip && attachment != VK_ATTACHMENT_UNUSED) {
9094                sample_count |= (unsigned)pCreateInfo->pAttachments[attachment].samples;
9095            }
9096        }
9097
9098        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9099            uint32_t attachment = subpass.pInputAttachments[j].attachment;
9100            skip |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Input");
9101        }
9102
9103        if (sample_count && !IsPowerOfTwo(sample_count)) {
9104            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
9105                            __LINE__, VALIDATION_ERROR_00337, "DS",
9106                            "CreateRenderPass:  Subpass %u attempts to render to "
9107                            "attachments with inconsistent sample counts. %s",
9108                            i, validation_error_map[VALIDATION_ERROR_00337]);
9109        }
9110    }
9111    return skip;
9112}
9113
9114static void MarkAttachmentFirstUse(RENDER_PASS_STATE *render_pass,
9115                                   uint32_t index,
9116                                   bool is_read) {
9117    if (index == VK_ATTACHMENT_UNUSED)
9118        return;
9119
9120    if (!render_pass->attachment_first_read.count(index))
9121        render_pass->attachment_first_read[index] = is_read;
9122}
9123
9124VKAPI_ATTR VkResult VKAPI_CALL CreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
9125                                                const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) {
9126    bool skip = false;
9127    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9128
9129    std::unique_lock<std::mutex> lock(global_lock);
9130    // TODO: As part of wrapping up the mem_tracker/core_validation merge the following routine should be consolidated with
9131    //       ValidateLayouts.
9132    skip |= ValidateRenderpassAttachmentUsage(dev_data, pCreateInfo);
9133    for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
9134        skip |= ValidateStageMaskGsTsEnables(dev_data, pCreateInfo->pDependencies[i].srcStageMask, "vkCreateRenderPass()",
9135                                             VALIDATION_ERROR_00368, VALIDATION_ERROR_00370);
9136        skip |= ValidateStageMaskGsTsEnables(dev_data, pCreateInfo->pDependencies[i].dstStageMask, "vkCreateRenderPass()",
9137                                             VALIDATION_ERROR_00369, VALIDATION_ERROR_00371);
9138    }
9139    if (!skip) {
9140        skip |= ValidateLayouts(dev_data, device, pCreateInfo);
9141    }
9142    lock.unlock();
9143
9144    if (skip) {
9145        return VK_ERROR_VALIDATION_FAILED_EXT;
9146    }
9147
9148    VkResult result = dev_data->dispatch_table.CreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass);
9149
9150    if (VK_SUCCESS == result) {
9151        lock.lock();
9152
9153        std::vector<bool> has_self_dependency(pCreateInfo->subpassCount);
9154        std::vector<DAGNode> subpass_to_node(pCreateInfo->subpassCount);
9155        skip |= CreatePassDAG(dev_data, pCreateInfo, subpass_to_node, has_self_dependency);
9156
9157        auto render_pass = unique_ptr<RENDER_PASS_STATE>(new RENDER_PASS_STATE(pCreateInfo));
9158        render_pass->renderPass = *pRenderPass;
9159        render_pass->hasSelfDependency = has_self_dependency;
9160        render_pass->subpassToNode = subpass_to_node;
9161
9162        for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9163            const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9164            for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9165                MarkAttachmentFirstUse(render_pass.get(), subpass.pColorAttachments[j].attachment, false);
9166
9167                // resolve attachments are considered to be written
9168                if (subpass.pResolveAttachments) {
9169                    MarkAttachmentFirstUse(render_pass.get(), subpass.pResolveAttachments[j].attachment, false);
9170                }
9171            }
9172            if (subpass.pDepthStencilAttachment) {
9173                MarkAttachmentFirstUse(render_pass.get(), subpass.pDepthStencilAttachment->attachment, false);
9174            }
9175            for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9176                MarkAttachmentFirstUse(render_pass.get(), subpass.pInputAttachments[j].attachment, true);
9177            }
9178        }
9179
9180        dev_data->renderPassMap[*pRenderPass] = std::move(render_pass);
9181    }
9182    return result;
9183}
9184
9185static bool validatePrimaryCommandBuffer(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, char const *cmd_name,
9186                                         UNIQUE_VALIDATION_ERROR_CODE error_code) {
9187    bool skip = false;
9188    if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
9189        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9190                        reinterpret_cast<uint64_t>(pCB->commandBuffer), __LINE__, error_code, "DS",
9191                        "Cannot execute command %s on a secondary command buffer. %s", cmd_name,
9192                        validation_error_map[error_code]);
9193    }
9194    return skip;
9195}
9196
9197static bool VerifyRenderAreaBounds(const layer_data *dev_data, const VkRenderPassBeginInfo *pRenderPassBegin) {
9198    bool skip = false;
9199    const safe_VkFramebufferCreateInfo *pFramebufferInfo =
9200        &GetFramebufferState(dev_data, pRenderPassBegin->framebuffer)->createInfo;
9201    if (pRenderPassBegin->renderArea.offset.x < 0 ||
9202        (pRenderPassBegin->renderArea.offset.x + pRenderPassBegin->renderArea.extent.width) > pFramebufferInfo->width ||
9203        pRenderPassBegin->renderArea.offset.y < 0 ||
9204        (pRenderPassBegin->renderArea.offset.y + pRenderPassBegin->renderArea.extent.height) > pFramebufferInfo->height) {
9205        skip |= static_cast<bool>(log_msg(
9206            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
9207            DRAWSTATE_INVALID_RENDER_AREA, "CORE",
9208            "Cannot execute a render pass with renderArea not within the bound of the "
9209            "framebuffer. RenderArea: x %d, y %d, width %d, height %d. Framebuffer: width %d, "
9210            "height %d.",
9211            pRenderPassBegin->renderArea.offset.x, pRenderPassBegin->renderArea.offset.y, pRenderPassBegin->renderArea.extent.width,
9212            pRenderPassBegin->renderArea.extent.height, pFramebufferInfo->width, pFramebufferInfo->height));
9213    }
9214    return skip;
9215}
9216
9217// If this is a stencil format, make sure the stencil[Load|Store]Op flag is checked, while if it is a depth/color attachment the
9218// [load|store]Op flag must be checked
9219// TODO: The memory valid flag in DEVICE_MEM_INFO should probably be split to track the validity of stencil memory separately.
9220template <typename T>
9221static bool FormatSpecificLoadAndStoreOpSettings(VkFormat format, T color_depth_op, T stencil_op, T op) {
9222    if (color_depth_op != op && stencil_op != op) {
9223        return false;
9224    }
9225    bool check_color_depth_load_op = !FormatIsStencilOnly(format);
9226    bool check_stencil_load_op = FormatIsDepthAndStencil(format) || !check_color_depth_load_op;
9227
9228    return ((check_color_depth_load_op && (color_depth_op == op)) ||
9229            (check_stencil_load_op && (stencil_op == op)));
9230}
9231
9232VKAPI_ATTR void VKAPI_CALL CmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
9233                                              VkSubpassContents contents) {
9234    bool skip = false;
9235    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
9236    std::unique_lock<std::mutex> lock(global_lock);
9237    GLOBAL_CB_NODE *cb_node = GetCBNode(dev_data, commandBuffer);
9238    auto render_pass_state = pRenderPassBegin ? GetRenderPassState(dev_data, pRenderPassBegin->renderPass) : nullptr;
9239    auto framebuffer = pRenderPassBegin ? GetFramebufferState(dev_data, pRenderPassBegin->framebuffer) : nullptr;
9240    if (cb_node) {
9241        if (render_pass_state) {
9242            uint32_t clear_op_size = 0;  // Make sure pClearValues is at least as large as last LOAD_OP_CLEAR
9243            cb_node->activeFramebuffer = pRenderPassBegin->framebuffer;
9244            for (uint32_t i = 0; i < render_pass_state->createInfo.attachmentCount; ++i) {
9245                MT_FB_ATTACHMENT_INFO &fb_info = framebuffer->attachments[i];
9246                auto pAttachment = &render_pass_state->createInfo.pAttachments[i];
9247                if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp, pAttachment->stencilLoadOp,
9248                                                         VK_ATTACHMENT_LOAD_OP_CLEAR)) {
9249                    clear_op_size = static_cast<uint32_t>(i) + 1;
9250                    std::function<bool()> function = [=]() {
9251                        SetImageMemoryValid(dev_data, GetImageState(dev_data, fb_info.image), true);
9252                        return false;
9253                    };
9254                    cb_node->validate_functions.push_back(function);
9255                } else if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp,
9256                                                                pAttachment->stencilLoadOp, VK_ATTACHMENT_LOAD_OP_DONT_CARE)) {
9257                    std::function<bool()> function = [=]() {
9258                        SetImageMemoryValid(dev_data, GetImageState(dev_data, fb_info.image), false);
9259                        return false;
9260                    };
9261                    cb_node->validate_functions.push_back(function);
9262                } else if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp,
9263                                                                pAttachment->stencilLoadOp, VK_ATTACHMENT_LOAD_OP_LOAD)) {
9264                    std::function<bool()> function = [=]() {
9265                        return ValidateImageMemoryIsValid(dev_data, GetImageState(dev_data, fb_info.image),
9266                                                          "vkCmdBeginRenderPass()");
9267                    };
9268                    cb_node->validate_functions.push_back(function);
9269                }
9270                if (render_pass_state->attachment_first_read[i]) {
9271                    std::function<bool()> function = [=]() {
9272                        return ValidateImageMemoryIsValid(dev_data, GetImageState(dev_data, fb_info.image),
9273                                                          "vkCmdBeginRenderPass()");
9274                    };
9275                    cb_node->validate_functions.push_back(function);
9276                }
9277            }
9278            if (clear_op_size > pRenderPassBegin->clearValueCount) {
9279                skip |= log_msg(
9280                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
9281                    reinterpret_cast<uint64_t &>(render_pass_state->renderPass), __LINE__, VALIDATION_ERROR_00442, "DS",
9282                    "In vkCmdBeginRenderPass() the VkRenderPassBeginInfo struct has a clearValueCount of %u but there must "
9283                    "be at least %u entries in pClearValues array to account for the highest index attachment in renderPass "
9284                    "0x%" PRIx64
9285                    " that uses VK_ATTACHMENT_LOAD_OP_CLEAR is %u. Note that the pClearValues array "
9286                    "is indexed by attachment number so even if some pClearValues entries between 0 and %u correspond to "
9287                    "attachments that aren't cleared they will be ignored. %s",
9288                    pRenderPassBegin->clearValueCount, clear_op_size, reinterpret_cast<uint64_t &>(render_pass_state->renderPass),
9289                    clear_op_size, clear_op_size - 1, validation_error_map[VALIDATION_ERROR_00442]);
9290            }
9291            if (clear_op_size < pRenderPassBegin->clearValueCount) {
9292                skip |= log_msg(
9293                    dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
9294                    reinterpret_cast<uint64_t &>(render_pass_state->renderPass), __LINE__,
9295                    DRAWSTATE_RENDERPASS_TOO_MANY_CLEAR_VALUES, "DS",
9296                    "In vkCmdBeginRenderPass() the VkRenderPassBeginInfo struct has a clearValueCount of %u but only first %u "
9297                    "entries in pClearValues array are used. The highest index of any attachment in renderPass 0x%" PRIx64
9298                    " that uses VK_ATTACHMENT_LOAD_OP_CLEAR is %u - other pClearValues are ignored.",
9299                    pRenderPassBegin->clearValueCount, clear_op_size, reinterpret_cast<uint64_t &>(render_pass_state->renderPass),
9300                    clear_op_size - 1);
9301            }
9302            skip |= VerifyRenderAreaBounds(dev_data, pRenderPassBegin);
9303            skip |= VerifyFramebufferAndRenderPassLayouts(dev_data, cb_node, pRenderPassBegin,
9304                                                          GetFramebufferState(dev_data, pRenderPassBegin->framebuffer));
9305            skip |= insideRenderPass(dev_data, cb_node, "vkCmdBeginRenderPass()", VALIDATION_ERROR_00440);
9306            skip |= ValidateDependencies(dev_data, framebuffer, render_pass_state);
9307            skip |= validatePrimaryCommandBuffer(dev_data, cb_node, "vkCmdBeginRenderPass()", VALIDATION_ERROR_00441);
9308            skip |=
9309                ValidateCmdQueueFlags(dev_data, cb_node, "vkCmdBeginRenderPass()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_00439);
9310            skip |= ValidateCmd(dev_data, cb_node, CMD_BEGINRENDERPASS, "vkCmdBeginRenderPass()");
9311            UpdateCmdBufferLastCmd(cb_node, CMD_BEGINRENDERPASS);
9312            cb_node->activeRenderPass = render_pass_state;
9313            // This is a shallow copy as that is all that is needed for now
9314            cb_node->activeRenderPassBeginInfo = *pRenderPassBegin;
9315            cb_node->activeSubpass = 0;
9316            cb_node->activeSubpassContents = contents;
9317            cb_node->framebuffers.insert(pRenderPassBegin->framebuffer);
9318            // Connect this framebuffer and its children to this cmdBuffer
9319            AddFramebufferBinding(dev_data, cb_node, framebuffer);
9320            // transition attachments to the correct layouts for beginning of renderPass and first subpass
9321            TransitionBeginRenderPassLayouts(dev_data, cb_node, render_pass_state, framebuffer);
9322        }
9323    }
9324    lock.unlock();
9325    if (!skip) {
9326        dev_data->dispatch_table.CmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
9327    }
9328}
9329
9330VKAPI_ATTR void VKAPI_CALL CmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
9331    bool skip = false;
9332    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
9333    std::unique_lock<std::mutex> lock(global_lock);
9334    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
9335    if (pCB) {
9336        skip |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdNextSubpass()", VALIDATION_ERROR_00459);
9337        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdNextSubpass()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_00457);
9338        skip |= ValidateCmd(dev_data, pCB, CMD_NEXTSUBPASS, "vkCmdNextSubpass()");
9339        UpdateCmdBufferLastCmd(pCB, CMD_NEXTSUBPASS);
9340        skip |= outsideRenderPass(dev_data, pCB, "vkCmdNextSubpass()", VALIDATION_ERROR_00458);
9341
9342        auto subpassCount = pCB->activeRenderPass->createInfo.subpassCount;
9343        if (pCB->activeSubpass == subpassCount - 1) {
9344            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9345                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, VALIDATION_ERROR_00453, "DS",
9346                            "vkCmdNextSubpass(): Attempted to advance beyond final subpass. %s",
9347                            validation_error_map[VALIDATION_ERROR_00453]);
9348        }
9349    }
9350    lock.unlock();
9351
9352    if (skip) return;
9353
9354    dev_data->dispatch_table.CmdNextSubpass(commandBuffer, contents);
9355
9356    if (pCB) {
9357        lock.lock();
9358        pCB->activeSubpass++;
9359        pCB->activeSubpassContents = contents;
9360        TransitionSubpassLayouts(dev_data, pCB, pCB->activeRenderPass, pCB->activeSubpass,
9361                                 GetFramebufferState(dev_data, pCB->activeRenderPassBeginInfo.framebuffer));
9362    }
9363}
9364
9365VKAPI_ATTR void VKAPI_CALL CmdEndRenderPass(VkCommandBuffer commandBuffer) {
9366    bool skip = false;
9367    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
9368    std::unique_lock<std::mutex> lock(global_lock);
9369    auto pCB = GetCBNode(dev_data, commandBuffer);
9370    FRAMEBUFFER_STATE *framebuffer = NULL;
9371    if (pCB) {
9372        RENDER_PASS_STATE *rp_state = pCB->activeRenderPass;
9373        framebuffer = GetFramebufferState(dev_data, pCB->activeFramebuffer);
9374        if (rp_state) {
9375            if (pCB->activeSubpass != rp_state->createInfo.subpassCount - 1) {
9376                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9377                                VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(commandBuffer), __LINE__,
9378                                VALIDATION_ERROR_00460, "DS", "vkCmdEndRenderPass(): Called before reaching final subpass. %s",
9379                                validation_error_map[VALIDATION_ERROR_00460]);
9380            }
9381
9382            for (size_t i = 0; i < rp_state->createInfo.attachmentCount; ++i) {
9383                MT_FB_ATTACHMENT_INFO &fb_info = framebuffer->attachments[i];
9384                auto pAttachment = &rp_state->createInfo.pAttachments[i];
9385                if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->storeOp, pAttachment->stencilStoreOp,
9386                                                         VK_ATTACHMENT_STORE_OP_STORE)) {
9387                    std::function<bool()> function = [=]() {
9388                        SetImageMemoryValid(dev_data, GetImageState(dev_data, fb_info.image), true);
9389                        return false;
9390                    };
9391                    pCB->validate_functions.push_back(function);
9392                } else if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->storeOp,
9393                                                                pAttachment->stencilStoreOp, VK_ATTACHMENT_STORE_OP_DONT_CARE)) {
9394                    std::function<bool()> function = [=]() {
9395                        SetImageMemoryValid(dev_data, GetImageState(dev_data, fb_info.image), false);
9396                        return false;
9397                    };
9398                    pCB->validate_functions.push_back(function);
9399                }
9400            }
9401        }
9402        skip |= outsideRenderPass(dev_data, pCB, "vkCmdEndRenderpass()", VALIDATION_ERROR_00464);
9403        skip |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdEndRenderPass()", VALIDATION_ERROR_00465);
9404        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdEndRenderPass()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_00463);
9405        skip |= ValidateCmd(dev_data, pCB, CMD_ENDRENDERPASS, "vkCmdEndRenderPass()");
9406        UpdateCmdBufferLastCmd(pCB, CMD_ENDRENDERPASS);
9407    }
9408    lock.unlock();
9409
9410    if (skip) return;
9411
9412    dev_data->dispatch_table.CmdEndRenderPass(commandBuffer);
9413
9414    if (pCB) {
9415        lock.lock();
9416        TransitionFinalSubpassLayouts(dev_data, pCB, &pCB->activeRenderPassBeginInfo, framebuffer);
9417        pCB->activeRenderPass = nullptr;
9418        pCB->activeSubpass = 0;
9419        pCB->activeFramebuffer = VK_NULL_HANDLE;
9420    }
9421}
9422
9423static bool logInvalidAttachmentMessage(layer_data *dev_data, VkCommandBuffer secondaryBuffer, uint32_t primaryAttach,
9424                                        uint32_t secondaryAttach, const char *msg) {
9425    return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9426                   reinterpret_cast<uint64_t>(secondaryBuffer), __LINE__, VALIDATION_ERROR_02059, "DS",
9427                   "vkCmdExecuteCommands() called w/ invalid Secondary Cmd Buffer 0x%" PRIx64
9428                   " which has a render pass "
9429                   "that is not compatible with the Primary Cmd Buffer current render pass. "
9430                   "Attachment %u is not compatible with %u: %s. %s",
9431                   reinterpret_cast<uint64_t &>(secondaryBuffer), primaryAttach, secondaryAttach, msg,
9432                   validation_error_map[VALIDATION_ERROR_02059]);
9433}
9434
9435static bool validateAttachmentCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer,
9436                                            VkRenderPassCreateInfo const *primaryPassCI, uint32_t primaryAttach,
9437                                            VkCommandBuffer secondaryBuffer, VkRenderPassCreateInfo const *secondaryPassCI,
9438                                            uint32_t secondaryAttach, bool is_multi) {
9439    bool skip = false;
9440    if (primaryPassCI->attachmentCount <= primaryAttach) {
9441        primaryAttach = VK_ATTACHMENT_UNUSED;
9442    }
9443    if (secondaryPassCI->attachmentCount <= secondaryAttach) {
9444        secondaryAttach = VK_ATTACHMENT_UNUSED;
9445    }
9446    if (primaryAttach == VK_ATTACHMENT_UNUSED && secondaryAttach == VK_ATTACHMENT_UNUSED) {
9447        return skip;
9448    }
9449    if (primaryAttach == VK_ATTACHMENT_UNUSED) {
9450        skip |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach,
9451                                            "The first is unused while the second is not.");
9452        return skip;
9453    }
9454    if (secondaryAttach == VK_ATTACHMENT_UNUSED) {
9455        skip |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach,
9456                                            "The second is unused while the first is not.");
9457        return skip;
9458    }
9459    if (primaryPassCI->pAttachments[primaryAttach].format != secondaryPassCI->pAttachments[secondaryAttach].format) {
9460        skip |=
9461            logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different formats.");
9462    }
9463    if (primaryPassCI->pAttachments[primaryAttach].samples != secondaryPassCI->pAttachments[secondaryAttach].samples) {
9464        skip |=
9465            logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different samples.");
9466    }
9467    if (is_multi && primaryPassCI->pAttachments[primaryAttach].flags != secondaryPassCI->pAttachments[secondaryAttach].flags) {
9468        skip |=
9469            logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different flags.");
9470    }
9471    return skip;
9472}
9473
9474static bool validateSubpassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer,
9475                                         VkRenderPassCreateInfo const *primaryPassCI, VkCommandBuffer secondaryBuffer,
9476                                         VkRenderPassCreateInfo const *secondaryPassCI, const int subpass, bool is_multi) {
9477    bool skip = false;
9478    const VkSubpassDescription &primary_desc = primaryPassCI->pSubpasses[subpass];
9479    const VkSubpassDescription &secondary_desc = secondaryPassCI->pSubpasses[subpass];
9480    uint32_t maxInputAttachmentCount = std::max(primary_desc.inputAttachmentCount, secondary_desc.inputAttachmentCount);
9481    for (uint32_t i = 0; i < maxInputAttachmentCount; ++i) {
9482        uint32_t primary_input_attach = VK_ATTACHMENT_UNUSED, secondary_input_attach = VK_ATTACHMENT_UNUSED;
9483        if (i < primary_desc.inputAttachmentCount) {
9484            primary_input_attach = primary_desc.pInputAttachments[i].attachment;
9485        }
9486        if (i < secondary_desc.inputAttachmentCount) {
9487            secondary_input_attach = secondary_desc.pInputAttachments[i].attachment;
9488        }
9489        skip |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_input_attach, secondaryBuffer,
9490                                                secondaryPassCI, secondary_input_attach, is_multi);
9491    }
9492    uint32_t maxColorAttachmentCount = std::max(primary_desc.colorAttachmentCount, secondary_desc.colorAttachmentCount);
9493    for (uint32_t i = 0; i < maxColorAttachmentCount; ++i) {
9494        uint32_t primary_color_attach = VK_ATTACHMENT_UNUSED, secondary_color_attach = VK_ATTACHMENT_UNUSED;
9495        if (i < primary_desc.colorAttachmentCount) {
9496            primary_color_attach = primary_desc.pColorAttachments[i].attachment;
9497        }
9498        if (i < secondary_desc.colorAttachmentCount) {
9499            secondary_color_attach = secondary_desc.pColorAttachments[i].attachment;
9500        }
9501        skip |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_color_attach, secondaryBuffer,
9502                                                secondaryPassCI, secondary_color_attach, is_multi);
9503        uint32_t primary_resolve_attach = VK_ATTACHMENT_UNUSED, secondary_resolve_attach = VK_ATTACHMENT_UNUSED;
9504        if (i < primary_desc.colorAttachmentCount && primary_desc.pResolveAttachments) {
9505            primary_resolve_attach = primary_desc.pResolveAttachments[i].attachment;
9506        }
9507        if (i < secondary_desc.colorAttachmentCount && secondary_desc.pResolveAttachments) {
9508            secondary_resolve_attach = secondary_desc.pResolveAttachments[i].attachment;
9509        }
9510        skip |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_resolve_attach, secondaryBuffer,
9511                                                secondaryPassCI, secondary_resolve_attach, is_multi);
9512    }
9513    uint32_t primary_depthstencil_attach = VK_ATTACHMENT_UNUSED, secondary_depthstencil_attach = VK_ATTACHMENT_UNUSED;
9514    if (primary_desc.pDepthStencilAttachment) {
9515        primary_depthstencil_attach = primary_desc.pDepthStencilAttachment[0].attachment;
9516    }
9517    if (secondary_desc.pDepthStencilAttachment) {
9518        secondary_depthstencil_attach = secondary_desc.pDepthStencilAttachment[0].attachment;
9519    }
9520    skip |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_depthstencil_attach, secondaryBuffer,
9521                                            secondaryPassCI, secondary_depthstencil_attach, is_multi);
9522    return skip;
9523}
9524
9525// Verify that given renderPass CreateInfo for primary and secondary command buffers are compatible.
9526//  This function deals directly with the CreateInfo, there are overloaded versions below that can take the renderPass handle and
9527//  will then feed into this function
9528static bool validateRenderPassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer,
9529                                            VkRenderPassCreateInfo const *primaryPassCI, VkCommandBuffer secondaryBuffer,
9530                                            VkRenderPassCreateInfo const *secondaryPassCI) {
9531    bool skip = false;
9532
9533    if (primaryPassCI->subpassCount != secondaryPassCI->subpassCount) {
9534        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9535                        reinterpret_cast<uint64_t>(primaryBuffer), __LINE__, DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9536                        "vkCmdExecuteCommands() called w/ invalid secondary Cmd Buffer 0x%" PRIx64
9537                        " that has a subpassCount of %u that is incompatible with the primary Cmd Buffer 0x%" PRIx64
9538                        " that has a subpassCount of %u.",
9539                        reinterpret_cast<uint64_t &>(secondaryBuffer), secondaryPassCI->subpassCount,
9540                        reinterpret_cast<uint64_t &>(primaryBuffer), primaryPassCI->subpassCount);
9541    } else {
9542        for (uint32_t i = 0; i < primaryPassCI->subpassCount; ++i) {
9543            skip |= validateSubpassCompatibility(dev_data, primaryBuffer, primaryPassCI, secondaryBuffer, secondaryPassCI, i,
9544                                                 primaryPassCI->subpassCount > 1);
9545        }
9546    }
9547    return skip;
9548}
9549
9550static bool validateFramebuffer(layer_data *dev_data, VkCommandBuffer primaryBuffer, const GLOBAL_CB_NODE *pCB,
9551                                VkCommandBuffer secondaryBuffer, const GLOBAL_CB_NODE *pSubCB) {
9552    bool skip = false;
9553    if (!pSubCB->beginInfo.pInheritanceInfo) {
9554        return skip;
9555    }
9556    VkFramebuffer primary_fb = pCB->activeFramebuffer;
9557    VkFramebuffer secondary_fb = pSubCB->beginInfo.pInheritanceInfo->framebuffer;
9558    if (secondary_fb != VK_NULL_HANDLE) {
9559        if (primary_fb != secondary_fb) {
9560            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9561                            reinterpret_cast<uint64_t>(primaryBuffer), __LINE__, VALIDATION_ERROR_02060, "DS",
9562                            "vkCmdExecuteCommands() called w/ invalid secondary command buffer 0x%" PRIx64
9563                            " which has a framebuffer 0x%" PRIx64
9564                            " that is not the same as the primary command buffer's current active framebuffer 0x%" PRIx64 ". %s",
9565                            reinterpret_cast<uint64_t &>(secondaryBuffer), reinterpret_cast<uint64_t &>(secondary_fb),
9566                            reinterpret_cast<uint64_t &>(primary_fb), validation_error_map[VALIDATION_ERROR_02060]);
9567        }
9568        auto fb = GetFramebufferState(dev_data, secondary_fb);
9569        if (!fb) {
9570            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9571                            reinterpret_cast<uint64_t>(primaryBuffer), __LINE__, DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9572                            "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
9573                            "which has invalid framebuffer 0x%" PRIx64 ".",
9574                            (void *)secondaryBuffer, (uint64_t)(secondary_fb));
9575            return skip;
9576        }
9577        auto cb_renderpass = GetRenderPassState(dev_data, pSubCB->beginInfo.pInheritanceInfo->renderPass);
9578        if (cb_renderpass->renderPass != fb->createInfo.renderPass) {
9579            skip |= validateRenderPassCompatibility(dev_data, secondaryBuffer, fb->renderPassCreateInfo.ptr(), secondaryBuffer,
9580                                                    cb_renderpass->createInfo.ptr());
9581        }
9582    }
9583    return skip;
9584}
9585
9586static bool validateSecondaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, GLOBAL_CB_NODE *pSubCB) {
9587    bool skip = false;
9588    unordered_set<int> activeTypes;
9589    for (auto queryObject : pCB->activeQueries) {
9590        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
9591        if (queryPoolData != dev_data->queryPoolMap.end()) {
9592            if (queryPoolData->second.createInfo.queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS &&
9593                pSubCB->beginInfo.pInheritanceInfo) {
9594                VkQueryPipelineStatisticFlags cmdBufStatistics = pSubCB->beginInfo.pInheritanceInfo->pipelineStatistics;
9595                if ((cmdBufStatistics & queryPoolData->second.createInfo.pipelineStatistics) != cmdBufStatistics) {
9596                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9597                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(pCB->commandBuffer),
9598                                    __LINE__, VALIDATION_ERROR_02065, "DS",
9599                                    "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
9600                                    "which has invalid active query pool 0x%" PRIx64
9601                                    ". Pipeline statistics is being queried so the command "
9602                                    "buffer must have all bits set on the queryPool. %s",
9603                                    pCB->commandBuffer, reinterpret_cast<const uint64_t &>(queryPoolData->first),
9604                                    validation_error_map[VALIDATION_ERROR_02065]);
9605                }
9606            }
9607            activeTypes.insert(queryPoolData->second.createInfo.queryType);
9608        }
9609    }
9610    for (auto queryObject : pSubCB->startedQueries) {
9611        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
9612        if (queryPoolData != dev_data->queryPoolMap.end() && activeTypes.count(queryPoolData->second.createInfo.queryType)) {
9613            skip |=
9614                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9615                        reinterpret_cast<uint64_t>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9616                        "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
9617                        "which has invalid active query pool 0x%" PRIx64
9618                        "of type %d but a query of that type has been started on "
9619                        "secondary Cmd Buffer 0x%p.",
9620                        pCB->commandBuffer, reinterpret_cast<const uint64_t &>(queryPoolData->first),
9621                        queryPoolData->second.createInfo.queryType, pSubCB->commandBuffer);
9622        }
9623    }
9624
9625    auto primary_pool = GetCommandPoolNode(dev_data, pCB->createInfo.commandPool);
9626    auto secondary_pool = GetCommandPoolNode(dev_data, pSubCB->createInfo.commandPool);
9627    if (primary_pool && secondary_pool && (primary_pool->queueFamilyIndex != secondary_pool->queueFamilyIndex)) {
9628        skip |=
9629            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9630                    reinterpret_cast<uint64_t>(pSubCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_QUEUE_FAMILY, "DS",
9631                    "vkCmdExecuteCommands(): Primary command buffer 0x%p"
9632                    " created in queue family %d has secondary command buffer 0x%p created in queue family %d.",
9633                    pCB->commandBuffer, primary_pool->queueFamilyIndex, pSubCB->commandBuffer, secondary_pool->queueFamilyIndex);
9634    }
9635
9636    return skip;
9637}
9638
9639VKAPI_ATTR void VKAPI_CALL CmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount,
9640                                              const VkCommandBuffer *pCommandBuffers) {
9641    bool skip = false;
9642    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
9643    std::unique_lock<std::mutex> lock(global_lock);
9644    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
9645    if (pCB) {
9646        GLOBAL_CB_NODE *pSubCB = NULL;
9647        for (uint32_t i = 0; i < commandBuffersCount; i++) {
9648            pSubCB = GetCBNode(dev_data, pCommandBuffers[i]);
9649            assert(pSubCB);
9650            if (VK_COMMAND_BUFFER_LEVEL_PRIMARY == pSubCB->createInfo.level) {
9651                skip |=
9652                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9653                            reinterpret_cast<uint64_t>(pCommandBuffers[i]), __LINE__, VALIDATION_ERROR_00156, "DS",
9654                            "vkCmdExecuteCommands() called w/ Primary Cmd Buffer 0x%p in element %u of pCommandBuffers "
9655                            "array. All cmd buffers in pCommandBuffers array must be secondary. %s",
9656                            pCommandBuffers[i], i, validation_error_map[VALIDATION_ERROR_00156]);
9657            } else if (pCB->activeRenderPass) {  // Secondary CB w/i RenderPass must have *CONTINUE_BIT set
9658                auto secondary_rp_state = GetRenderPassState(dev_data, pSubCB->beginInfo.pInheritanceInfo->renderPass);
9659                if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
9660                    skip |= log_msg(
9661                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9662                        reinterpret_cast<uint64_t>(pCommandBuffers[i]), __LINE__, VALIDATION_ERROR_02057, "DS",
9663                        "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) executed within render pass (0x%" PRIxLEAST64
9664                        ") must have had vkBeginCommandBuffer() called w/ VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set. %s",
9665                        pCommandBuffers[i], (uint64_t)pCB->activeRenderPass->renderPass,
9666                        validation_error_map[VALIDATION_ERROR_02057]);
9667                } else {
9668                    // Make sure render pass is compatible with parent command buffer pass if has continue
9669                    if (pCB->activeRenderPass->renderPass != secondary_rp_state->renderPass) {
9670                        skip |= validateRenderPassCompatibility(dev_data, commandBuffer, pCB->activeRenderPass->createInfo.ptr(),
9671                                                                pCommandBuffers[i], secondary_rp_state->createInfo.ptr());
9672                    }
9673                    //  If framebuffer for secondary CB is not NULL, then it must match active FB from primaryCB
9674                    skip |= validateFramebuffer(dev_data, commandBuffer, pCB, pCommandBuffers[i], pSubCB);
9675                }
9676                string errorString = "";
9677                // secondaryCB must have been created w/ RP compatible w/ primaryCB active renderpass
9678                if ((pCB->activeRenderPass->renderPass != secondary_rp_state->renderPass) &&
9679                    !verify_renderpass_compatibility(dev_data, pCB->activeRenderPass->createInfo.ptr(),
9680                                                     secondary_rp_state->createInfo.ptr(), errorString)) {
9681                    skip |= log_msg(
9682                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9683                        reinterpret_cast<uint64_t>(pCommandBuffers[i]), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
9684                        "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) w/ render pass (0x%" PRIxLEAST64
9685                        ") is incompatible w/ primary command buffer (0x%p) w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
9686                        pCommandBuffers[i], (uint64_t)pSubCB->beginInfo.pInheritanceInfo->renderPass, commandBuffer,
9687                        (uint64_t)pCB->activeRenderPass->renderPass, errorString.c_str());
9688                }
9689            }
9690            // TODO(mlentine): Move more logic into this method
9691            skip |= validateSecondaryCommandBufferState(dev_data, pCB, pSubCB);
9692            skip |= validateCommandBufferState(dev_data, pSubCB, "vkCmdExecuteCommands()", 0);
9693            // Secondary cmdBuffers are considered pending execution starting w/
9694            // being recorded
9695            if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
9696                if (dev_data->globalInFlightCmdBuffers.find(pSubCB->commandBuffer) != dev_data->globalInFlightCmdBuffers.end()) {
9697                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9698                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(pCB->commandBuffer),
9699                                    __LINE__, VALIDATION_ERROR_00154, "DS",
9700                                    "Attempt to simultaneously execute command buffer 0x%p"
9701                                    " without VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set! %s",
9702                                    pCB->commandBuffer, validation_error_map[VALIDATION_ERROR_00154]);
9703                }
9704                if (pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) {
9705                    // Warn that non-simultaneous secondary cmd buffer renders primary non-simultaneous
9706                    skip |= log_msg(
9707                        dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9708                        reinterpret_cast<uint64_t>(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
9709                        "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) "
9710                        "does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary command buffer "
9711                        "(0x%p) to be treated as if it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
9712                        "set, even though it does.",
9713                        pCommandBuffers[i], pCB->commandBuffer);
9714                    pCB->beginInfo.flags &= ~VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
9715                }
9716            }
9717            if (!pCB->activeQueries.empty() && !dev_data->enabled_features.inheritedQueries) {
9718                skip |=
9719                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9720                            reinterpret_cast<uint64_t>(pCommandBuffers[i]), __LINE__, VALIDATION_ERROR_02062, "DS",
9721                            "vkCmdExecuteCommands(): Secondary Command Buffer "
9722                            "(0x%p) cannot be submitted with a query in "
9723                            "flight and inherited queries not "
9724                            "supported on this device. %s",
9725                            pCommandBuffers[i], validation_error_map[VALIDATION_ERROR_02062]);
9726            }
9727            // Propagate layout transitions to the primary cmd buffer
9728            for (auto ilm_entry : pSubCB->imageLayoutMap) {
9729                SetLayout(dev_data, pCB, ilm_entry.first, ilm_entry.second);
9730            }
9731            pSubCB->primaryCommandBuffer = pCB->commandBuffer;
9732            pCB->secondaryCommandBuffers.insert(pSubCB->commandBuffer);
9733            dev_data->globalInFlightCmdBuffers.insert(pSubCB->commandBuffer);
9734            for (auto &function : pSubCB->queryUpdates) {
9735                pCB->queryUpdates.push_back(function);
9736            }
9737        }
9738        skip |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdExecuteCommands()", VALIDATION_ERROR_00163);
9739        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdExecuteCommands()",
9740                                      VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, VALIDATION_ERROR_00162);
9741        skip |= ValidateCmd(dev_data, pCB, CMD_EXECUTECOMMANDS, "vkCmdExecuteCommands()");
9742        UpdateCmdBufferLastCmd(pCB, CMD_EXECUTECOMMANDS);
9743    }
9744    lock.unlock();
9745    if (!skip) dev_data->dispatch_table.CmdExecuteCommands(commandBuffer, commandBuffersCount, pCommandBuffers);
9746}
9747
9748VKAPI_ATTR VkResult VKAPI_CALL MapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags,
9749                                         void **ppData) {
9750    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9751
9752    bool skip = false;
9753    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9754    std::unique_lock<std::mutex> lock(global_lock);
9755    DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
9756    if (mem_info) {
9757        // TODO : This could me more fine-grained to track just region that is valid
9758        mem_info->global_valid = true;
9759        auto end_offset = (VK_WHOLE_SIZE == size) ? mem_info->alloc_info.allocationSize - 1 : offset + size - 1;
9760        skip |= ValidateMapImageLayouts(dev_data, device, mem_info, offset, end_offset);
9761        // TODO : Do we need to create new "bound_range" for the mapped range?
9762        SetMemRangesValid(dev_data, mem_info, offset, end_offset);
9763        if ((dev_data->phys_dev_mem_props.memoryTypes[mem_info->alloc_info.memoryTypeIndex].propertyFlags &
9764             VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
9765            skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
9766                           (uint64_t)mem, __LINE__, VALIDATION_ERROR_00629, "MEM",
9767                           "Mapping Memory without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set: mem obj 0x%" PRIxLEAST64 ". %s",
9768                           (uint64_t)mem, validation_error_map[VALIDATION_ERROR_00629]);
9769        }
9770    }
9771    skip |= ValidateMapMemRange(dev_data, mem, offset, size);
9772    lock.unlock();
9773
9774    if (!skip) {
9775        result = dev_data->dispatch_table.MapMemory(device, mem, offset, size, flags, ppData);
9776        if (VK_SUCCESS == result) {
9777            lock.lock();
9778            // TODO : What's the point of this range? See comment on creating new "bound_range" above, which may replace this
9779            storeMemRanges(dev_data, mem, offset, size);
9780            initializeAndTrackMemory(dev_data, mem, offset, size, ppData);
9781            lock.unlock();
9782        }
9783    }
9784    return result;
9785}
9786
9787VKAPI_ATTR void VKAPI_CALL UnmapMemory(VkDevice device, VkDeviceMemory mem) {
9788    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9789    bool skip = false;
9790
9791    std::unique_lock<std::mutex> lock(global_lock);
9792    skip |= deleteMemRanges(dev_data, mem);
9793    lock.unlock();
9794    if (!skip) {
9795        dev_data->dispatch_table.UnmapMemory(device, mem);
9796    }
9797}
9798
9799static bool validateMemoryIsMapped(layer_data *dev_data, const char *funcName, uint32_t memRangeCount,
9800                                   const VkMappedMemoryRange *pMemRanges) {
9801    bool skip = false;
9802    for (uint32_t i = 0; i < memRangeCount; ++i) {
9803        auto mem_info = GetMemObjInfo(dev_data, pMemRanges[i].memory);
9804        if (mem_info) {
9805            if (pMemRanges[i].size == VK_WHOLE_SIZE) {
9806                if (mem_info->mem_range.offset > pMemRanges[i].offset) {
9807                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9808                                    VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
9809                                    VALIDATION_ERROR_00643, "MEM", "%s: Flush/Invalidate offset (" PRINTF_SIZE_T_SPECIFIER
9810                                                                   ") is less than Memory Object's offset "
9811                                                                   "(" PRINTF_SIZE_T_SPECIFIER "). %s",
9812                                    funcName, static_cast<size_t>(pMemRanges[i].offset),
9813                                    static_cast<size_t>(mem_info->mem_range.offset), validation_error_map[VALIDATION_ERROR_00643]);
9814                }
9815            } else {
9816                const uint64_t data_end = (mem_info->mem_range.size == VK_WHOLE_SIZE)
9817                                              ? mem_info->alloc_info.allocationSize
9818                                              : (mem_info->mem_range.offset + mem_info->mem_range.size);
9819                if ((mem_info->mem_range.offset > pMemRanges[i].offset) ||
9820                    (data_end < (pMemRanges[i].offset + pMemRanges[i].size))) {
9821                    skip |=
9822                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
9823                                (uint64_t)pMemRanges[i].memory, __LINE__, VALIDATION_ERROR_00642, "MEM",
9824                                "%s: Flush/Invalidate size or offset (" PRINTF_SIZE_T_SPECIFIER ", " PRINTF_SIZE_T_SPECIFIER
9825                                ") exceed the Memory Object's upper-bound "
9826                                "(" PRINTF_SIZE_T_SPECIFIER "). %s",
9827                                funcName, static_cast<size_t>(pMemRanges[i].offset + pMemRanges[i].size),
9828                                static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(data_end),
9829                                validation_error_map[VALIDATION_ERROR_00642]);
9830                }
9831            }
9832        }
9833    }
9834    return skip;
9835}
9836
9837static bool ValidateAndCopyNoncoherentMemoryToDriver(layer_data *dev_data, uint32_t mem_range_count,
9838                                                     const VkMappedMemoryRange *mem_ranges) {
9839    bool skip = false;
9840    for (uint32_t i = 0; i < mem_range_count; ++i) {
9841        auto mem_info = GetMemObjInfo(dev_data, mem_ranges[i].memory);
9842        if (mem_info) {
9843            if (mem_info->shadow_copy) {
9844                VkDeviceSize size = (mem_info->mem_range.size != VK_WHOLE_SIZE)
9845                                        ? mem_info->mem_range.size
9846                                        : (mem_info->alloc_info.allocationSize - mem_info->mem_range.offset);
9847                char *data = static_cast<char *>(mem_info->shadow_copy);
9848                for (uint64_t j = 0; j < mem_info->shadow_pad_size; ++j) {
9849                    if (data[j] != NoncoherentMemoryFillValue) {
9850                        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9851                                        VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem_ranges[i].memory, __LINE__,
9852                                        MEMTRACK_INVALID_MAP, "MEM", "Memory underflow was detected on mem obj 0x%" PRIxLEAST64,
9853                                        (uint64_t)mem_ranges[i].memory);
9854                    }
9855                }
9856                for (uint64_t j = (size + mem_info->shadow_pad_size); j < (2 * mem_info->shadow_pad_size + size); ++j) {
9857                    if (data[j] != NoncoherentMemoryFillValue) {
9858                        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9859                                        VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem_ranges[i].memory, __LINE__,
9860                                        MEMTRACK_INVALID_MAP, "MEM", "Memory overflow was detected on mem obj 0x%" PRIxLEAST64,
9861                                        (uint64_t)mem_ranges[i].memory);
9862                    }
9863                }
9864                memcpy(mem_info->p_driver_data, static_cast<void *>(data + mem_info->shadow_pad_size), (size_t)(size));
9865            }
9866        }
9867    }
9868    return skip;
9869}
9870
9871static void CopyNoncoherentMemoryFromDriver(layer_data *dev_data, uint32_t mem_range_count, const VkMappedMemoryRange *mem_ranges) {
9872    for (uint32_t i = 0; i < mem_range_count; ++i) {
9873        auto mem_info = GetMemObjInfo(dev_data, mem_ranges[i].memory);
9874        if (mem_info && mem_info->shadow_copy) {
9875            VkDeviceSize size = (mem_info->mem_range.size != VK_WHOLE_SIZE)
9876                                    ? mem_info->mem_range.size
9877                                    : (mem_info->alloc_info.allocationSize - mem_ranges[i].offset);
9878            char *data = static_cast<char *>(mem_info->shadow_copy);
9879            memcpy(data + mem_info->shadow_pad_size, mem_info->p_driver_data, (size_t)(size));
9880        }
9881    }
9882}
9883
9884static bool ValidateMappedMemoryRangeDeviceLimits(layer_data *dev_data, const char *func_name, uint32_t mem_range_count,
9885                                                  const VkMappedMemoryRange *mem_ranges) {
9886    bool skip = false;
9887    for (uint32_t i = 0; i < mem_range_count; ++i) {
9888        uint64_t atom_size = dev_data->phys_dev_properties.properties.limits.nonCoherentAtomSize;
9889        if (SafeModulo(mem_ranges[i].offset, atom_size) != 0) {
9890            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
9891                            reinterpret_cast<const uint64_t &>(mem_ranges->memory), __LINE__, VALIDATION_ERROR_00644, "MEM",
9892                            "%s: Offset in pMemRanges[%d] is 0x%" PRIxLEAST64
9893                            ", which is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 "). %s",
9894                            func_name, i, mem_ranges[i].offset, atom_size, validation_error_map[VALIDATION_ERROR_00644]);
9895        }
9896        if ((mem_ranges[i].size != VK_WHOLE_SIZE) && (SafeModulo(mem_ranges[i].size, atom_size) != 0)) {
9897            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
9898                            reinterpret_cast<const uint64_t &>(mem_ranges->memory), __LINE__, VALIDATION_ERROR_00645, "MEM",
9899                            "%s: Size in pMemRanges[%d] is 0x%" PRIxLEAST64
9900                            ", which is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 "). %s",
9901                            func_name, i, mem_ranges[i].size, atom_size, validation_error_map[VALIDATION_ERROR_00645]);
9902        }
9903    }
9904    return skip;
9905}
9906
9907static bool PreCallValidateFlushMappedMemoryRanges(layer_data *dev_data, uint32_t mem_range_count,
9908                                                   const VkMappedMemoryRange *mem_ranges) {
9909    bool skip = false;
9910    std::lock_guard<std::mutex> lock(global_lock);
9911    skip |= ValidateAndCopyNoncoherentMemoryToDriver(dev_data, mem_range_count, mem_ranges);
9912    skip |= validateMemoryIsMapped(dev_data, "vkFlushMappedMemoryRanges", mem_range_count, mem_ranges);
9913    return skip;
9914}
9915
9916VKAPI_ATTR VkResult VKAPI_CALL FlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount,
9917                                                       const VkMappedMemoryRange *pMemRanges) {
9918    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9919    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9920
9921    if (!PreCallValidateFlushMappedMemoryRanges(dev_data, memRangeCount, pMemRanges)) {
9922        result = dev_data->dispatch_table.FlushMappedMemoryRanges(device, memRangeCount, pMemRanges);
9923    }
9924    return result;
9925}
9926
9927static bool PreCallValidateInvalidateMappedMemoryRanges(layer_data *dev_data, uint32_t mem_range_count,
9928                                                        const VkMappedMemoryRange *mem_ranges) {
9929    bool skip = false;
9930    std::lock_guard<std::mutex> lock(global_lock);
9931    skip |= validateMemoryIsMapped(dev_data, "vkInvalidateMappedMemoryRanges", mem_range_count, mem_ranges);
9932    return skip;
9933}
9934
9935static void PostCallRecordInvalidateMappedMemoryRanges(layer_data *dev_data, uint32_t mem_range_count,
9936                                                       const VkMappedMemoryRange *mem_ranges) {
9937    std::lock_guard<std::mutex> lock(global_lock);
9938    // Update our shadow copy with modified driver data
9939    CopyNoncoherentMemoryFromDriver(dev_data, mem_range_count, mem_ranges);
9940}
9941
9942VKAPI_ATTR VkResult VKAPI_CALL InvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount,
9943                                                            const VkMappedMemoryRange *pMemRanges) {
9944    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9945    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9946
9947    if (!PreCallValidateInvalidateMappedMemoryRanges(dev_data, memRangeCount, pMemRanges)) {
9948        result = dev_data->dispatch_table.InvalidateMappedMemoryRanges(device, memRangeCount, pMemRanges);
9949        if (result == VK_SUCCESS) {
9950            PostCallRecordInvalidateMappedMemoryRanges(dev_data, memRangeCount, pMemRanges);
9951        }
9952    }
9953    return result;
9954}
9955
9956static bool PreCallValidateBindImageMemory(layer_data *dev_data, VkImage image, IMAGE_STATE *image_state, VkDeviceMemory mem,
9957                                           VkDeviceSize memoryOffset) {
9958    bool skip = false;
9959    if (image_state) {
9960        std::unique_lock<std::mutex> lock(global_lock);
9961        // Track objects tied to memory
9962        uint64_t image_handle = reinterpret_cast<uint64_t &>(image);
9963        skip = ValidateSetMemBinding(dev_data, mem, image_handle, kVulkanObjectTypeImage, "vkBindImageMemory()");
9964        if (!image_state->memory_requirements_checked) {
9965            // There's not an explicit requirement in the spec to call vkGetImageMemoryRequirements() prior to calling
9966            // BindImageMemory but it's implied in that memory being bound must conform with VkMemoryRequirements from
9967            // vkGetImageMemoryRequirements()
9968            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
9969                            image_handle, __LINE__, DRAWSTATE_INVALID_IMAGE, "DS",
9970                            "vkBindImageMemory(): Binding memory to image 0x%" PRIxLEAST64
9971                            " but vkGetImageMemoryRequirements() has not been called on that image.",
9972                            image_handle);
9973            // Make the call for them so we can verify the state
9974            lock.unlock();
9975            dev_data->dispatch_table.GetImageMemoryRequirements(dev_data->device, image, &image_state->requirements);
9976            lock.lock();
9977        }
9978
9979        // Validate bound memory range information
9980        auto mem_info = GetMemObjInfo(dev_data, mem);
9981        if (mem_info) {
9982            skip |= ValidateInsertImageMemoryRange(dev_data, image, mem_info, memoryOffset, image_state->requirements,
9983                                                   image_state->createInfo.tiling == VK_IMAGE_TILING_LINEAR, "vkBindImageMemory()");
9984            skip |= ValidateMemoryTypes(dev_data, mem_info, image_state->requirements.memoryTypeBits, "vkBindImageMemory()",
9985                                        VALIDATION_ERROR_00806);
9986        }
9987
9988        // Validate memory requirements alignment
9989        if (SafeModulo(memoryOffset, image_state->requirements.alignment) != 0) {
9990            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
9991                            image_handle, __LINE__, VALIDATION_ERROR_02178, "DS",
9992                            "vkBindImageMemory(): memoryOffset is 0x%" PRIxLEAST64
9993                            " but must be an integer multiple of the "
9994                            "VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
9995                            ", returned from a call to vkGetImageMemoryRequirements with image. %s",
9996                            memoryOffset, image_state->requirements.alignment, validation_error_map[VALIDATION_ERROR_02178]);
9997        }
9998
9999        // Validate memory requirements size
10000        if (image_state->requirements.size > mem_info->alloc_info.allocationSize - memoryOffset) {
10001            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
10002                            image_handle, __LINE__, VALIDATION_ERROR_02179, "DS",
10003                            "vkBindImageMemory(): memory size minus memoryOffset is 0x%" PRIxLEAST64
10004                            " but must be at least as large as "
10005                            "VkMemoryRequirements::size value 0x%" PRIxLEAST64
10006                            ", returned from a call to vkGetImageMemoryRequirements with image. %s",
10007                            mem_info->alloc_info.allocationSize - memoryOffset, image_state->requirements.size,
10008                            validation_error_map[VALIDATION_ERROR_02179]);
10009        }
10010    }
10011    return skip;
10012}
10013
10014static void PostCallRecordBindImageMemory(layer_data *dev_data, VkImage image, IMAGE_STATE *image_state, VkDeviceMemory mem,
10015                                          VkDeviceSize memoryOffset) {
10016    if (image_state) {
10017        std::unique_lock<std::mutex> lock(global_lock);
10018        // Track bound memory range information
10019        auto mem_info = GetMemObjInfo(dev_data, mem);
10020        if (mem_info) {
10021            InsertImageMemoryRange(dev_data, image, mem_info, memoryOffset, image_state->requirements,
10022                                   image_state->createInfo.tiling == VK_IMAGE_TILING_LINEAR);
10023        }
10024
10025        // Track objects tied to memory
10026        uint64_t image_handle = reinterpret_cast<uint64_t &>(image);
10027        SetMemBinding(dev_data, mem, image_handle, kVulkanObjectTypeImage, "vkBindImageMemory()");
10028
10029        image_state->binding.mem = mem;
10030        image_state->binding.offset = memoryOffset;
10031        image_state->binding.size = image_state->requirements.size;
10032    }
10033}
10034
10035VKAPI_ATTR VkResult VKAPI_CALL BindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
10036    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10037    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10038    auto image_state = GetImageState(dev_data, image);
10039    bool skip = PreCallValidateBindImageMemory(dev_data, image, image_state, mem, memoryOffset);
10040    if (!skip) {
10041        result = dev_data->dispatch_table.BindImageMemory(device, image, mem, memoryOffset);
10042        if (result == VK_SUCCESS) {
10043            PostCallRecordBindImageMemory(dev_data, image, image_state, mem, memoryOffset);
10044        }
10045    }
10046    return result;
10047}
10048
10049VKAPI_ATTR VkResult VKAPI_CALL SetEvent(VkDevice device, VkEvent event) {
10050    bool skip = false;
10051    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10052    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10053    std::unique_lock<std::mutex> lock(global_lock);
10054    auto event_state = GetEventNode(dev_data, event);
10055    if (event_state) {
10056        event_state->needsSignaled = false;
10057        event_state->stageMask = VK_PIPELINE_STAGE_HOST_BIT;
10058        if (event_state->write_in_use) {
10059            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
10060                            reinterpret_cast<const uint64_t &>(event), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
10061                            "Cannot call vkSetEvent() on event 0x%" PRIxLEAST64 " that is already in use by a command buffer.",
10062                            reinterpret_cast<const uint64_t &>(event));
10063        }
10064    }
10065    lock.unlock();
10066    // Host setting event is visible to all queues immediately so update stageMask for any queue that's seen this event
10067    // TODO : For correctness this needs separate fix to verify that app doesn't make incorrect assumptions about the
10068    // ordering of this command in relation to vkCmd[Set|Reset]Events (see GH297)
10069    for (auto queue_data : dev_data->queueMap) {
10070        auto event_entry = queue_data.second.eventToStageMap.find(event);
10071        if (event_entry != queue_data.second.eventToStageMap.end()) {
10072            event_entry->second |= VK_PIPELINE_STAGE_HOST_BIT;
10073        }
10074    }
10075    if (!skip) result = dev_data->dispatch_table.SetEvent(device, event);
10076    return result;
10077}
10078
10079VKAPI_ATTR VkResult VKAPI_CALL QueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo,
10080                                               VkFence fence) {
10081    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
10082    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10083    bool skip = false;
10084    std::unique_lock<std::mutex> lock(global_lock);
10085    auto pFence = GetFenceNode(dev_data, fence);
10086    auto pQueue = GetQueueState(dev_data, queue);
10087
10088    // First verify that fence is not in use
10089    skip |= ValidateFenceForSubmit(dev_data, pFence);
10090
10091    if (pFence) {
10092        SubmitFence(pQueue, pFence, std::max(1u, bindInfoCount));
10093    }
10094
10095    for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) {
10096        const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx];
10097        // Track objects tied to memory
10098        for (uint32_t j = 0; j < bindInfo.bufferBindCount; j++) {
10099            for (uint32_t k = 0; k < bindInfo.pBufferBinds[j].bindCount; k++) {
10100                auto sparse_binding = bindInfo.pBufferBinds[j].pBinds[k];
10101                if (SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, sparse_binding.size},
10102                                        (uint64_t)bindInfo.pBufferBinds[j].buffer, kVulkanObjectTypeBuffer))
10103                    skip = true;
10104            }
10105        }
10106        for (uint32_t j = 0; j < bindInfo.imageOpaqueBindCount; j++) {
10107            for (uint32_t k = 0; k < bindInfo.pImageOpaqueBinds[j].bindCount; k++) {
10108                auto sparse_binding = bindInfo.pImageOpaqueBinds[j].pBinds[k];
10109                if (SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, sparse_binding.size},
10110                                        (uint64_t)bindInfo.pImageOpaqueBinds[j].image, kVulkanObjectTypeImage))
10111                    skip = true;
10112            }
10113        }
10114        for (uint32_t j = 0; j < bindInfo.imageBindCount; j++) {
10115            for (uint32_t k = 0; k < bindInfo.pImageBinds[j].bindCount; k++) {
10116                auto sparse_binding = bindInfo.pImageBinds[j].pBinds[k];
10117                // TODO: This size is broken for non-opaque bindings, need to update to comprehend full sparse binding data
10118                VkDeviceSize size = sparse_binding.extent.depth * sparse_binding.extent.height * sparse_binding.extent.width * 4;
10119                if (SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, size},
10120                                        (uint64_t)bindInfo.pImageBinds[j].image, kVulkanObjectTypeImage))
10121                    skip = true;
10122            }
10123        }
10124
10125        std::vector<SEMAPHORE_WAIT> semaphore_waits;
10126        std::vector<VkSemaphore> semaphore_signals;
10127        for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) {
10128            VkSemaphore semaphore = bindInfo.pWaitSemaphores[i];
10129            auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
10130            if (pSemaphore) {
10131                if (pSemaphore->signaled) {
10132                    if (pSemaphore->signaler.first != VK_NULL_HANDLE) {
10133                        semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second});
10134                        pSemaphore->in_use.fetch_add(1);
10135                    }
10136                    pSemaphore->signaler.first = VK_NULL_HANDLE;
10137                    pSemaphore->signaled = false;
10138                } else {
10139                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
10140                                    reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
10141                                    "vkQueueBindSparse: Queue 0x%p is waiting on semaphore 0x%" PRIx64
10142                                    " that has no way to be signaled.",
10143                                    queue, reinterpret_cast<const uint64_t &>(semaphore));
10144                }
10145            }
10146        }
10147        for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) {
10148            VkSemaphore semaphore = bindInfo.pSignalSemaphores[i];
10149            auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
10150            if (pSemaphore) {
10151                if (pSemaphore->signaled) {
10152                    skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
10153                                   reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
10154                                   "vkQueueBindSparse: Queue 0x%p is signaling semaphore 0x%" PRIx64
10155                                   ", but that semaphore is already signaled.",
10156                                   queue, reinterpret_cast<const uint64_t &>(semaphore));
10157                } else {
10158                    pSemaphore->signaler.first = queue;
10159                    pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1;
10160                    pSemaphore->signaled = true;
10161                    pSemaphore->in_use.fetch_add(1);
10162                    semaphore_signals.push_back(semaphore);
10163                }
10164            }
10165        }
10166
10167        pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(), semaphore_waits, semaphore_signals,
10168                                         bindIdx == bindInfoCount - 1 ? fence : VK_NULL_HANDLE);
10169    }
10170
10171    if (pFence && !bindInfoCount) {
10172        // No work to do, just dropping a fence in the queue by itself.
10173        pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(), std::vector<SEMAPHORE_WAIT>(), std::vector<VkSemaphore>(),
10174                                         fence);
10175    }
10176
10177    lock.unlock();
10178
10179    if (!skip) return dev_data->dispatch_table.QueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
10180
10181    return result;
10182}
10183
10184VKAPI_ATTR VkResult VKAPI_CALL CreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo,
10185                                               const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore) {
10186    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10187    VkResult result = dev_data->dispatch_table.CreateSemaphore(device, pCreateInfo, pAllocator, pSemaphore);
10188    if (result == VK_SUCCESS) {
10189        std::lock_guard<std::mutex> lock(global_lock);
10190        SEMAPHORE_NODE *sNode = &dev_data->semaphoreMap[*pSemaphore];
10191        sNode->signaler.first = VK_NULL_HANDLE;
10192        sNode->signaler.second = 0;
10193        sNode->signaled = false;
10194    }
10195    return result;
10196}
10197
10198VKAPI_ATTR VkResult VKAPI_CALL CreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo,
10199                                           const VkAllocationCallbacks *pAllocator, VkEvent *pEvent) {
10200    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10201    VkResult result = dev_data->dispatch_table.CreateEvent(device, pCreateInfo, pAllocator, pEvent);
10202    if (result == VK_SUCCESS) {
10203        std::lock_guard<std::mutex> lock(global_lock);
10204        dev_data->eventMap[*pEvent].needsSignaled = false;
10205        dev_data->eventMap[*pEvent].write_in_use = 0;
10206        dev_data->eventMap[*pEvent].stageMask = VkPipelineStageFlags(0);
10207    }
10208    return result;
10209}
10210
10211static bool PreCallValidateCreateSwapchainKHR(layer_data *dev_data, const char *func_name,
10212                                              VkSwapchainCreateInfoKHR const *pCreateInfo, SURFACE_STATE *surface_state,
10213                                              SWAPCHAIN_NODE *old_swapchain_state) {
10214    auto most_recent_swapchain = surface_state->swapchain ? surface_state->swapchain : surface_state->old_swapchain;
10215
10216    // TODO: revisit this. some of these rules are being relaxed.
10217    if (most_recent_swapchain != old_swapchain_state || (surface_state->old_swapchain && surface_state->swapchain)) {
10218        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10219                    reinterpret_cast<uint64_t>(dev_data->device), __LINE__, DRAWSTATE_SWAPCHAIN_ALREADY_EXISTS, "DS",
10220                    "%s: surface has an existing swapchain other than oldSwapchain", func_name))
10221            return true;
10222    }
10223    if (old_swapchain_state && old_swapchain_state->createInfo.surface != pCreateInfo->surface) {
10224        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10225                    reinterpret_cast<uint64_t const &>(pCreateInfo->oldSwapchain), __LINE__, DRAWSTATE_SWAPCHAIN_WRONG_SURFACE,
10226                    "DS", "%s: pCreateInfo->oldSwapchain's surface is not pCreateInfo->surface", func_name))
10227            return true;
10228    }
10229    auto physical_device_state = GetPhysicalDeviceState(dev_data->instance_data, dev_data->physical_device);
10230    if (physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState == UNCALLED) {
10231        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
10232                    reinterpret_cast<uint64_t>(dev_data->physical_device), __LINE__, DRAWSTATE_SWAPCHAIN_CREATE_BEFORE_QUERY, "DS",
10233                    "%s: surface capabilities not retrieved for this physical device", func_name))
10234            return true;
10235    } else {  // have valid capabilities
10236        auto &capabilities = physical_device_state->surfaceCapabilities;
10237        // Validate pCreateInfo->minImageCount against VkSurfaceCapabilitiesKHR::{min|max}ImageCount:
10238        if (pCreateInfo->minImageCount < capabilities.minImageCount) {
10239            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10240                        reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02331, "DS",
10241                        "%s called with minImageCount = %d, which is outside the bounds returned "
10242                        "by vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. minImageCount = %d, maxImageCount = %d). %s",
10243                        func_name, pCreateInfo->minImageCount, capabilities.minImageCount, capabilities.maxImageCount,
10244                        validation_error_map[VALIDATION_ERROR_02331]))
10245                return true;
10246        }
10247
10248        if ((capabilities.maxImageCount > 0) && (pCreateInfo->minImageCount > capabilities.maxImageCount)) {
10249            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10250                        reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02332, "DS",
10251                        "%s called with minImageCount = %d, which is outside the bounds returned "
10252                        "by vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. minImageCount = %d, maxImageCount = %d). %s",
10253                        func_name, pCreateInfo->minImageCount, capabilities.minImageCount, capabilities.maxImageCount,
10254                        validation_error_map[VALIDATION_ERROR_02332]))
10255                return true;
10256        }
10257
10258        // Validate pCreateInfo->imageExtent against VkSurfaceCapabilitiesKHR::{current|min|max}ImageExtent:
10259        if ((capabilities.currentExtent.width == kSurfaceSizeFromSwapchain) &&
10260            ((pCreateInfo->imageExtent.width < capabilities.minImageExtent.width) ||
10261             (pCreateInfo->imageExtent.width > capabilities.maxImageExtent.width) ||
10262             (pCreateInfo->imageExtent.height < capabilities.minImageExtent.height) ||
10263             (pCreateInfo->imageExtent.height > capabilities.maxImageExtent.height))) {
10264            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10265                        reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02334, "DS",
10266                        "%s called with imageExtent = (%d,%d), which is outside the bounds returned by "
10267                        "vkGetPhysicalDeviceSurfaceCapabilitiesKHR(): currentExtent = (%d,%d), minImageExtent = (%d,%d), "
10268                        "maxImageExtent = (%d,%d). %s",
10269                        func_name, pCreateInfo->imageExtent.width, pCreateInfo->imageExtent.height,
10270                        capabilities.currentExtent.width, capabilities.currentExtent.height, capabilities.minImageExtent.width,
10271                        capabilities.minImageExtent.height, capabilities.maxImageExtent.width, capabilities.maxImageExtent.height,
10272                        validation_error_map[VALIDATION_ERROR_02334]))
10273                return true;
10274        }
10275        if ((capabilities.currentExtent.width != kSurfaceSizeFromSwapchain) &&
10276            ((pCreateInfo->imageExtent.width != capabilities.currentExtent.width) ||
10277             (pCreateInfo->imageExtent.height != capabilities.currentExtent.height))) {
10278            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10279                        reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02334, "DS",
10280                        "%s called with imageExtent = (%d,%d), which is not equal to the currentExtent = (%d,%d) returned by "
10281                        "vkGetPhysicalDeviceSurfaceCapabilitiesKHR(). %s",
10282                        func_name, pCreateInfo->imageExtent.width, pCreateInfo->imageExtent.height,
10283                        capabilities.currentExtent.width, capabilities.currentExtent.height,
10284                        validation_error_map[VALIDATION_ERROR_02334]))
10285                return true;
10286        }
10287        // pCreateInfo->preTransform should have exactly one bit set, and that bit must also be set in
10288        // VkSurfaceCapabilitiesKHR::supportedTransforms.
10289        if (!pCreateInfo->preTransform || (pCreateInfo->preTransform & (pCreateInfo->preTransform - 1)) ||
10290            !(pCreateInfo->preTransform & capabilities.supportedTransforms)) {
10291            // This is an error situation; one for which we'd like to give the developer a helpful, multi-line error message.  Build
10292            // it up a little at a time, and then log it:
10293            std::string errorString = "";
10294            char str[1024];
10295            // Here's the first part of the message:
10296            sprintf(str, "%s called with a non-supported pCreateInfo->preTransform (i.e. %s).  Supported values are:\n", func_name,
10297                    string_VkSurfaceTransformFlagBitsKHR(pCreateInfo->preTransform));
10298            errorString += str;
10299            for (int i = 0; i < 32; i++) {
10300                // Build up the rest of the message:
10301                if ((1 << i) & capabilities.supportedTransforms) {
10302                    const char *newStr = string_VkSurfaceTransformFlagBitsKHR((VkSurfaceTransformFlagBitsKHR)(1 << i));
10303                    sprintf(str, "    %s\n", newStr);
10304                    errorString += str;
10305                }
10306            }
10307            // Log the message that we've built up:
10308            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10309                        reinterpret_cast<uint64_t &>(dev_data->device), __LINE__, VALIDATION_ERROR_02339, "DS", "%s. %s",
10310                        errorString.c_str(), validation_error_map[VALIDATION_ERROR_02339]))
10311                return true;
10312        }
10313
10314        // pCreateInfo->compositeAlpha should have exactly one bit set, and that bit must also be set in
10315        // VkSurfaceCapabilitiesKHR::supportedCompositeAlpha
10316        if (!pCreateInfo->compositeAlpha || (pCreateInfo->compositeAlpha & (pCreateInfo->compositeAlpha - 1)) ||
10317            !((pCreateInfo->compositeAlpha) & capabilities.supportedCompositeAlpha)) {
10318            // This is an error situation; one for which we'd like to give the developer a helpful, multi-line error message.  Build
10319            // it up a little at a time, and then log it:
10320            std::string errorString = "";
10321            char str[1024];
10322            // Here's the first part of the message:
10323            sprintf(str, "%s called with a non-supported pCreateInfo->compositeAlpha (i.e. %s).  Supported values are:\n",
10324                    func_name, string_VkCompositeAlphaFlagBitsKHR(pCreateInfo->compositeAlpha));
10325            errorString += str;
10326            for (int i = 0; i < 32; i++) {
10327                // Build up the rest of the message:
10328                if ((1 << i) & capabilities.supportedCompositeAlpha) {
10329                    const char *newStr = string_VkCompositeAlphaFlagBitsKHR((VkCompositeAlphaFlagBitsKHR)(1 << i));
10330                    sprintf(str, "    %s\n", newStr);
10331                    errorString += str;
10332                }
10333            }
10334            // Log the message that we've built up:
10335            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10336                        reinterpret_cast<uint64_t &>(dev_data->device), __LINE__, VALIDATION_ERROR_02340, "DS", "%s. %s",
10337                        errorString.c_str(), validation_error_map[VALIDATION_ERROR_02340]))
10338                return true;
10339        }
10340        // Validate pCreateInfo->imageArrayLayers against VkSurfaceCapabilitiesKHR::maxImageArrayLayers:
10341        if ((pCreateInfo->imageArrayLayers < 1) || (pCreateInfo->imageArrayLayers > capabilities.maxImageArrayLayers)) {
10342            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10343                        reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02335, "DS",
10344                        "%s called with a non-supported imageArrayLayers (i.e. %d).  Minimum value is 1, maximum value is %d. %s",
10345                        func_name, pCreateInfo->imageArrayLayers, capabilities.maxImageArrayLayers,
10346                        validation_error_map[VALIDATION_ERROR_02335]))
10347                return true;
10348        }
10349        // Validate pCreateInfo->imageUsage against VkSurfaceCapabilitiesKHR::supportedUsageFlags:
10350        if (pCreateInfo->imageUsage != (pCreateInfo->imageUsage & capabilities.supportedUsageFlags)) {
10351            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10352                        reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02336, "DS",
10353                        "%s called with a non-supported pCreateInfo->imageUsage (i.e. 0x%08x).  Supported flag bits are 0x%08x. %s",
10354                        func_name, pCreateInfo->imageUsage, capabilities.supportedUsageFlags,
10355                        validation_error_map[VALIDATION_ERROR_02336]))
10356                return true;
10357        }
10358    }
10359
10360    // Validate pCreateInfo values with the results of vkGetPhysicalDeviceSurfaceFormatsKHR():
10361    if (physical_device_state->vkGetPhysicalDeviceSurfaceFormatsKHRState != QUERY_DETAILS) {
10362        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10363                    reinterpret_cast<uint64_t>(dev_data->device), __LINE__, DRAWSTATE_SWAPCHAIN_CREATE_BEFORE_QUERY, "DS",
10364                    "%s called before calling vkGetPhysicalDeviceSurfaceFormatsKHR().", func_name))
10365            return true;
10366    } else {
10367        // Validate pCreateInfo->imageFormat against VkSurfaceFormatKHR::format:
10368        bool foundFormat = false;
10369        bool foundColorSpace = false;
10370        bool foundMatch = false;
10371        for (auto const &format : physical_device_state->surface_formats) {
10372            if (pCreateInfo->imageFormat == format.format) {
10373                // Validate pCreateInfo->imageColorSpace against VkSurfaceFormatKHR::colorSpace:
10374                foundFormat = true;
10375                if (pCreateInfo->imageColorSpace == format.colorSpace) {
10376                    foundMatch = true;
10377                    break;
10378                }
10379            } else {
10380                if (pCreateInfo->imageColorSpace == format.colorSpace) {
10381                    foundColorSpace = true;
10382                }
10383            }
10384        }
10385        if (!foundMatch) {
10386            if (!foundFormat) {
10387                if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10388                            reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02333, "DS",
10389                            "%s called with a non-supported pCreateInfo->imageFormat (i.e. %d). %s", func_name,
10390                            pCreateInfo->imageFormat, validation_error_map[VALIDATION_ERROR_02333]))
10391                    return true;
10392            }
10393            if (!foundColorSpace) {
10394                if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10395                            reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02333, "DS",
10396                            "%s called with a non-supported pCreateInfo->imageColorSpace (i.e. %d). %s", func_name,
10397                            pCreateInfo->imageColorSpace, validation_error_map[VALIDATION_ERROR_02333]))
10398                    return true;
10399            }
10400        }
10401    }
10402
10403    // Validate pCreateInfo values with the results of vkGetPhysicalDeviceSurfacePresentModesKHR():
10404    if (physical_device_state->vkGetPhysicalDeviceSurfacePresentModesKHRState != QUERY_DETAILS) {
10405        // FIFO is required to always be supported
10406        if (pCreateInfo->presentMode != VK_PRESENT_MODE_FIFO_KHR) {
10407            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10408                        reinterpret_cast<uint64_t>(dev_data->device), __LINE__, DRAWSTATE_SWAPCHAIN_CREATE_BEFORE_QUERY, "DS",
10409                        "%s called before calling vkGetPhysicalDeviceSurfacePresentModesKHR().", func_name))
10410                return true;
10411        }
10412    } else {
10413        // Validate pCreateInfo->presentMode against vkGetPhysicalDeviceSurfacePresentModesKHR():
10414        bool foundMatch = std::find(physical_device_state->present_modes.begin(), physical_device_state->present_modes.end(),
10415                                    pCreateInfo->presentMode) != physical_device_state->present_modes.end();
10416        if (!foundMatch) {
10417            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10418                        reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02341, "DS",
10419                        "%s called with a non-supported presentMode (i.e. %s). %s", func_name,
10420                        string_VkPresentModeKHR(pCreateInfo->presentMode), validation_error_map[VALIDATION_ERROR_02341]))
10421                return true;
10422        }
10423    }
10424
10425    return false;
10426}
10427
10428static void PostCallRecordCreateSwapchainKHR(layer_data *dev_data, VkResult result, const VkSwapchainCreateInfoKHR *pCreateInfo,
10429                                             VkSwapchainKHR *pSwapchain, SURFACE_STATE *surface_state,
10430                                             SWAPCHAIN_NODE *old_swapchain_state) {
10431    if (VK_SUCCESS == result) {
10432        std::lock_guard<std::mutex> lock(global_lock);
10433        auto swapchain_state = unique_ptr<SWAPCHAIN_NODE>(new SWAPCHAIN_NODE(pCreateInfo, *pSwapchain));
10434        surface_state->swapchain = swapchain_state.get();
10435        dev_data->device_extensions.swapchainMap[*pSwapchain] = std::move(swapchain_state);
10436    } else {
10437        surface_state->swapchain = nullptr;
10438    }
10439    // Spec requires that even if CreateSwapchainKHR fails, oldSwapchain behaves as replaced.
10440    if (old_swapchain_state) {
10441        old_swapchain_state->replaced = true;
10442    }
10443    surface_state->old_swapchain = old_swapchain_state;
10444    return;
10445}
10446
10447VKAPI_ATTR VkResult VKAPI_CALL CreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
10448                                                  const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchain) {
10449    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10450    auto surface_state = GetSurfaceState(dev_data->instance_data, pCreateInfo->surface);
10451    auto old_swapchain_state = GetSwapchainNode(dev_data, pCreateInfo->oldSwapchain);
10452
10453    if (PreCallValidateCreateSwapchainKHR(dev_data, "vkCreateSwapChainKHR()", pCreateInfo, surface_state, old_swapchain_state)) {
10454        return VK_ERROR_VALIDATION_FAILED_EXT;
10455    }
10456
10457    VkResult result = dev_data->dispatch_table.CreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);
10458
10459    PostCallRecordCreateSwapchainKHR(dev_data, result, pCreateInfo, pSwapchain, surface_state, old_swapchain_state);
10460
10461    return result;
10462}
10463
10464VKAPI_ATTR void VKAPI_CALL DestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
10465    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10466    bool skip = false;
10467
10468    std::unique_lock<std::mutex> lock(global_lock);
10469    auto swapchain_data = GetSwapchainNode(dev_data, swapchain);
10470    if (swapchain_data) {
10471        if (swapchain_data->images.size() > 0) {
10472            for (auto swapchain_image : swapchain_data->images) {
10473                auto image_sub = dev_data->imageSubresourceMap.find(swapchain_image);
10474                if (image_sub != dev_data->imageSubresourceMap.end()) {
10475                    for (auto imgsubpair : image_sub->second) {
10476                        auto image_item = dev_data->imageLayoutMap.find(imgsubpair);
10477                        if (image_item != dev_data->imageLayoutMap.end()) {
10478                            dev_data->imageLayoutMap.erase(image_item);
10479                        }
10480                    }
10481                    dev_data->imageSubresourceMap.erase(image_sub);
10482                }
10483                skip = ClearMemoryObjectBindings(dev_data, (uint64_t)swapchain_image, kVulkanObjectTypeSwapchainKHR);
10484                dev_data->imageMap.erase(swapchain_image);
10485            }
10486        }
10487
10488        auto surface_state = GetSurfaceState(dev_data->instance_data, swapchain_data->createInfo.surface);
10489        if (surface_state) {
10490            if (surface_state->swapchain == swapchain_data) surface_state->swapchain = nullptr;
10491            if (surface_state->old_swapchain == swapchain_data) surface_state->old_swapchain = nullptr;
10492        }
10493
10494        dev_data->device_extensions.swapchainMap.erase(swapchain);
10495    }
10496    lock.unlock();
10497    if (!skip) dev_data->dispatch_table.DestroySwapchainKHR(device, swapchain, pAllocator);
10498}
10499
10500VKAPI_ATTR VkResult VKAPI_CALL GetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pCount,
10501                                                     VkImage *pSwapchainImages) {
10502    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10503    VkResult result = dev_data->dispatch_table.GetSwapchainImagesKHR(device, swapchain, pCount, pSwapchainImages);
10504
10505    if (result == VK_SUCCESS && pSwapchainImages != NULL) {
10506        // This should never happen and is checked by param checker.
10507        if (!pCount) return result;
10508        std::lock_guard<std::mutex> lock(global_lock);
10509        const size_t count = *pCount;
10510        auto swapchain_node = GetSwapchainNode(dev_data, swapchain);
10511        if (swapchain_node && !swapchain_node->images.empty()) {
10512            // TODO : Not sure I like the memcmp here, but it works
10513            const bool mismatch = (swapchain_node->images.size() != count ||
10514                                   memcmp(&swapchain_node->images[0], pSwapchainImages, sizeof(swapchain_node->images[0]) * count));
10515            if (mismatch) {
10516                // TODO: Verify against Valid Usage section of extension
10517                log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10518                        (uint64_t)swapchain, __LINE__, MEMTRACK_NONE, "SWAP_CHAIN",
10519                        "vkGetSwapchainInfoKHR(0x%" PRIx64
10520                        ", VK_SWAP_CHAIN_INFO_TYPE_PERSISTENT_IMAGES_KHR) returned mismatching data",
10521                        (uint64_t)(swapchain));
10522            }
10523        }
10524        for (uint32_t i = 0; i < *pCount; ++i) {
10525            IMAGE_LAYOUT_NODE image_layout_node;
10526            image_layout_node.layout = VK_IMAGE_LAYOUT_UNDEFINED;
10527            image_layout_node.format = swapchain_node->createInfo.imageFormat;
10528            // Add imageMap entries for each swapchain image
10529            VkImageCreateInfo image_ci = {};
10530            image_ci.flags = 0;
10531            image_ci.imageType = VK_IMAGE_TYPE_2D;
10532            image_ci.format = swapchain_node->createInfo.imageFormat;
10533            image_ci.extent.width = swapchain_node->createInfo.imageExtent.width;
10534            image_ci.extent.height = swapchain_node->createInfo.imageExtent.height;
10535            image_ci.extent.depth = 1;
10536            image_ci.mipLevels = 1;
10537            image_ci.arrayLayers = swapchain_node->createInfo.imageArrayLayers;
10538            image_ci.samples = VK_SAMPLE_COUNT_1_BIT;
10539            image_ci.tiling = VK_IMAGE_TILING_OPTIMAL;
10540            image_ci.usage = swapchain_node->createInfo.imageUsage;
10541            image_ci.sharingMode = swapchain_node->createInfo.imageSharingMode;
10542            dev_data->imageMap[pSwapchainImages[i]] = unique_ptr<IMAGE_STATE>(new IMAGE_STATE(pSwapchainImages[i], &image_ci));
10543            auto &image_state = dev_data->imageMap[pSwapchainImages[i]];
10544            image_state->valid = false;
10545            image_state->binding.mem = MEMTRACKER_SWAP_CHAIN_IMAGE_KEY;
10546            swapchain_node->images.push_back(pSwapchainImages[i]);
10547            ImageSubresourcePair subpair = {pSwapchainImages[i], false, VkImageSubresource()};
10548            dev_data->imageSubresourceMap[pSwapchainImages[i]].push_back(subpair);
10549            dev_data->imageLayoutMap[subpair] = image_layout_node;
10550            dev_data->device_extensions.imageToSwapchainMap[pSwapchainImages[i]] = swapchain;
10551        }
10552    }
10553    return result;
10554}
10555
10556VKAPI_ATTR VkResult VKAPI_CALL QueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) {
10557    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
10558    bool skip = false;
10559
10560    std::lock_guard<std::mutex> lock(global_lock);
10561    auto queue_state = GetQueueState(dev_data, queue);
10562
10563    for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
10564        auto pSemaphore = GetSemaphoreNode(dev_data, pPresentInfo->pWaitSemaphores[i]);
10565        if (pSemaphore && !pSemaphore->signaled) {
10566            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
10567                            __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
10568                            "Queue 0x%p is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.", queue,
10569                            reinterpret_cast<const uint64_t &>(pPresentInfo->pWaitSemaphores[i]));
10570        }
10571    }
10572
10573    for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
10574        auto swapchain_data = GetSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
10575        if (swapchain_data) {
10576            if (pPresentInfo->pImageIndices[i] >= swapchain_data->images.size()) {
10577                skip |= log_msg(
10578                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10579                    reinterpret_cast<uint64_t const &>(pPresentInfo->pSwapchains[i]), __LINE__, DRAWSTATE_SWAPCHAIN_INVALID_IMAGE,
10580                    "DS", "vkQueuePresentKHR: Swapchain image index too large (%u). There are only %u images in this swapchain.",
10581                    pPresentInfo->pImageIndices[i], (uint32_t)swapchain_data->images.size());
10582            } else {
10583                auto image = swapchain_data->images[pPresentInfo->pImageIndices[i]];
10584                auto image_state = GetImageState(dev_data, image);
10585                skip |= ValidateImageMemoryIsValid(dev_data, image_state, "vkQueuePresentKHR()");
10586
10587                if (!image_state->acquired) {
10588                    skip |= log_msg(
10589                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10590                        reinterpret_cast<uint64_t const &>(pPresentInfo->pSwapchains[i]), __LINE__,
10591                        DRAWSTATE_SWAPCHAIN_IMAGE_NOT_ACQUIRED, "DS",
10592                        "vkQueuePresentKHR: Swapchain image index %u has not been acquired.", pPresentInfo->pImageIndices[i]);
10593                }
10594
10595                vector<VkImageLayout> layouts;
10596                if (FindLayouts(dev_data, image, layouts)) {
10597                    for (auto layout : layouts) {
10598                        if (layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) {
10599                            skip |=
10600                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
10601                                        reinterpret_cast<uint64_t &>(queue), __LINE__, VALIDATION_ERROR_01964, "DS",
10602                                        "Images passed to present must be in layout "
10603                                        "VK_IMAGE_LAYOUT_PRESENT_SRC_KHR but is in %s. %s",
10604                                        string_VkImageLayout(layout), validation_error_map[VALIDATION_ERROR_01964]);
10605                        }
10606                    }
10607                }
10608            }
10609
10610            // All physical devices and queue families are required to be able
10611            // to present to any native window on Android; require the
10612            // application to have established support on any other platform.
10613            if (!dev_data->instance_data->androidSurfaceExtensionEnabled) {
10614                auto surface_state = GetSurfaceState(dev_data->instance_data, swapchain_data->createInfo.surface);
10615                auto support_it = surface_state->gpu_queue_support.find({dev_data->physical_device, queue_state->queueFamilyIndex});
10616
10617                if (support_it == surface_state->gpu_queue_support.end()) {
10618                    skip |=
10619                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10620                                reinterpret_cast<uint64_t const &>(pPresentInfo->pSwapchains[i]), __LINE__,
10621                                DRAWSTATE_SWAPCHAIN_UNSUPPORTED_QUEUE, "DS",
10622                                "vkQueuePresentKHR: Presenting image without calling "
10623                                "vkGetPhysicalDeviceSurfaceSupportKHR");
10624                } else if (!support_it->second) {
10625                    skip |= log_msg(
10626                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10627                        reinterpret_cast<uint64_t const &>(pPresentInfo->pSwapchains[i]), __LINE__, VALIDATION_ERROR_01961, "DS",
10628                        "vkQueuePresentKHR: Presenting image on queue that cannot "
10629                        "present to this surface. %s",
10630                        validation_error_map[VALIDATION_ERROR_01961]);
10631                }
10632            }
10633        }
10634    }
10635    if (pPresentInfo && pPresentInfo->pNext) {
10636        // Verify ext struct
10637        struct std_header {
10638            VkStructureType sType;
10639            const void *pNext;
10640        };
10641        std_header *pnext = (std_header *)pPresentInfo->pNext;
10642        while (pnext) {
10643            if (VK_STRUCTURE_TYPE_PRESENT_REGIONS_KHR == pnext->sType) {
10644                VkPresentRegionsKHR *present_regions = (VkPresentRegionsKHR *)pnext;
10645                for (uint32_t i = 0; i < present_regions->swapchainCount; ++i) {
10646                    auto swapchain_data = GetSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
10647                    assert(swapchain_data);
10648                    VkPresentRegionKHR region = present_regions->pRegions[i];
10649                    for (uint32_t j = 0; j < region.rectangleCount; ++j) {
10650                        VkRectLayerKHR rect = region.pRectangles[j];
10651                        // TODO: Need to update these errors to their unique error ids when available
10652                        if ((rect.offset.x + rect.extent.width) > swapchain_data->createInfo.imageExtent.width) {
10653                            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10654                                            VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10655                                            reinterpret_cast<uint64_t const &>(pPresentInfo->pSwapchains[i]), __LINE__,
10656                                            DRAWSTATE_SWAPCHAIN_INVALID_IMAGE, "DS",
10657                                            "vkQueuePresentKHR(): For VkPresentRegionKHR down pNext "
10658                                            "chain, pRegion[%i].pRectangles[%i], the sum of offset.x "
10659                                            "(%i) and extent.width (%i) is greater than the "
10660                                            "corresponding swapchain's imageExtent.width (%i).",
10661                                            i, j, rect.offset.x, rect.extent.width, swapchain_data->createInfo.imageExtent.width);
10662                        }
10663                        if ((rect.offset.y + rect.extent.height) > swapchain_data->createInfo.imageExtent.height) {
10664                            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10665                                            VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10666                                            reinterpret_cast<uint64_t const &>(pPresentInfo->pSwapchains[i]), __LINE__,
10667                                            DRAWSTATE_SWAPCHAIN_INVALID_IMAGE, "DS",
10668                                            "vkQueuePresentKHR(): For VkPresentRegionKHR down pNext "
10669                                            "chain, pRegion[%i].pRectangles[%i], the sum of offset.y "
10670                                            "(%i) and extent.height (%i) is greater than the "
10671                                            "corresponding swapchain's imageExtent.height (%i).",
10672                                            i, j, rect.offset.y, rect.extent.height, swapchain_data->createInfo.imageExtent.height);
10673                        }
10674                        if (rect.layer > swapchain_data->createInfo.imageArrayLayers) {
10675                            skip |= log_msg(
10676                                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10677                                reinterpret_cast<uint64_t const &>(pPresentInfo->pSwapchains[i]), __LINE__,
10678                                DRAWSTATE_SWAPCHAIN_INVALID_IMAGE, "DS",
10679                                "vkQueuePresentKHR(): For VkPresentRegionKHR down pNext chain, pRegion[%i].pRectangles[%i], the "
10680                                "layer (%i) is greater than the corresponding swapchain's imageArrayLayers (%i).",
10681                                i, j, rect.layer, swapchain_data->createInfo.imageArrayLayers);
10682                        }
10683                    }
10684                }
10685            } else if (VK_STRUCTURE_TYPE_PRESENT_TIMES_INFO_GOOGLE == pnext->sType) {
10686                VkPresentTimesInfoGOOGLE *present_times_info = (VkPresentTimesInfoGOOGLE *)pnext;
10687                if (pPresentInfo->swapchainCount != present_times_info->swapchainCount) {
10688                    skip |=
10689                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10690                                reinterpret_cast<uint64_t const &>(pPresentInfo->pSwapchains[0]), __LINE__,
10691
10692                                VALIDATION_ERROR_03214, "DS",
10693                                "vkQueuePresentKHR(): VkPresentTimesInfoGOOGLE.swapchainCount is %i but "
10694                                "pPresentInfo->swapchainCount is %i. For VkPresentTimesInfoGOOGLE down pNext "
10695                                "chain of VkPresentInfoKHR, VkPresentTimesInfoGOOGLE.swapchainCount "
10696                                "must equal VkPresentInfoKHR.swapchainCount.",
10697                                present_times_info->swapchainCount, pPresentInfo->swapchainCount);
10698                }
10699            }
10700            pnext = (std_header *)pnext->pNext;
10701        }
10702    }
10703
10704    if (skip) {
10705        return VK_ERROR_VALIDATION_FAILED_EXT;
10706    }
10707
10708    VkResult result = dev_data->dispatch_table.QueuePresentKHR(queue, pPresentInfo);
10709
10710    if (result != VK_ERROR_VALIDATION_FAILED_EXT) {
10711        // Semaphore waits occur before error generation, if the call reached
10712        // the ICD. (Confirm?)
10713        for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
10714            auto pSemaphore = GetSemaphoreNode(dev_data, pPresentInfo->pWaitSemaphores[i]);
10715            if (pSemaphore) {
10716                pSemaphore->signaler.first = VK_NULL_HANDLE;
10717                pSemaphore->signaled = false;
10718            }
10719        }
10720
10721        for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
10722            // Note: this is imperfect, in that we can get confused about what
10723            // did or didn't succeed-- but if the app does that, it's confused
10724            // itself just as much.
10725            auto local_result = pPresentInfo->pResults ? pPresentInfo->pResults[i] : result;
10726
10727            if (local_result != VK_SUCCESS && local_result != VK_SUBOPTIMAL_KHR) continue;  // this present didn't actually happen.
10728
10729            // Mark the image as having been released to the WSI
10730            auto swapchain_data = GetSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
10731            auto image = swapchain_data->images[pPresentInfo->pImageIndices[i]];
10732            auto image_state = GetImageState(dev_data, image);
10733            image_state->acquired = false;
10734        }
10735
10736        // Note: even though presentation is directed to a queue, there is no
10737        // direct ordering between QP and subsequent work, so QP (and its
10738        // semaphore waits) /never/ participate in any completion proof.
10739    }
10740
10741    return result;
10742}
10743
10744static bool PreCallValidateCreateSharedSwapchainsKHR(layer_data *dev_data, uint32_t swapchainCount,
10745                                                     const VkSwapchainCreateInfoKHR *pCreateInfos, VkSwapchainKHR *pSwapchains,
10746                                                     std::vector<SURFACE_STATE *> &surface_state,
10747                                                     std::vector<SWAPCHAIN_NODE *> &old_swapchain_state) {
10748    if (pCreateInfos) {
10749        std::lock_guard<std::mutex> lock(global_lock);
10750        for (uint32_t i = 0; i < swapchainCount; i++) {
10751            surface_state.push_back(GetSurfaceState(dev_data->instance_data, pCreateInfos[i].surface));
10752            old_swapchain_state.push_back(GetSwapchainNode(dev_data, pCreateInfos[i].oldSwapchain));
10753            std::stringstream func_name;
10754            func_name << "vkCreateSharedSwapchainsKHR[" << swapchainCount << "]";
10755            if (PreCallValidateCreateSwapchainKHR(dev_data, func_name.str().c_str(), &pCreateInfos[i], surface_state[i],
10756                                                  old_swapchain_state[i])) {
10757                return true;
10758            }
10759        }
10760    }
10761    return false;
10762}
10763
10764static void PostCallRecordCreateSharedSwapchainsKHR(layer_data *dev_data, VkResult result, uint32_t swapchainCount,
10765                                                    const VkSwapchainCreateInfoKHR *pCreateInfos, VkSwapchainKHR *pSwapchains,
10766                                                    std::vector<SURFACE_STATE *> &surface_state,
10767                                                    std::vector<SWAPCHAIN_NODE *> &old_swapchain_state) {
10768    if (VK_SUCCESS == result) {
10769        for (uint32_t i = 0; i < swapchainCount; i++) {
10770            auto swapchain_state = unique_ptr<SWAPCHAIN_NODE>(new SWAPCHAIN_NODE(&pCreateInfos[i], pSwapchains[i]));
10771            surface_state[i]->swapchain = swapchain_state.get();
10772            dev_data->device_extensions.swapchainMap[pSwapchains[i]] = std::move(swapchain_state);
10773        }
10774    } else {
10775        for (uint32_t i = 0; i < swapchainCount; i++) {
10776            surface_state[i]->swapchain = nullptr;
10777        }
10778    }
10779    // Spec requires that even if CreateSharedSwapchainKHR fails, oldSwapchain behaves as replaced.
10780    for (uint32_t i = 0; i < swapchainCount; i++) {
10781        if (old_swapchain_state[i]) {
10782            old_swapchain_state[i]->replaced = true;
10783        }
10784        surface_state[i]->old_swapchain = old_swapchain_state[i];
10785    }
10786    return;
10787}
10788
10789VKAPI_ATTR VkResult VKAPI_CALL CreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount,
10790                                                         const VkSwapchainCreateInfoKHR *pCreateInfos,
10791                                                         const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchains) {
10792    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10793    std::vector<SURFACE_STATE *> surface_state;
10794    std::vector<SWAPCHAIN_NODE *> old_swapchain_state;
10795
10796    if (PreCallValidateCreateSharedSwapchainsKHR(dev_data, swapchainCount, pCreateInfos, pSwapchains, surface_state,
10797                                                 old_swapchain_state)) {
10798        return VK_ERROR_VALIDATION_FAILED_EXT;
10799    }
10800
10801    VkResult result =
10802        dev_data->dispatch_table.CreateSharedSwapchainsKHR(device, swapchainCount, pCreateInfos, pAllocator, pSwapchains);
10803
10804    PostCallRecordCreateSharedSwapchainsKHR(dev_data, result, swapchainCount, pCreateInfos, pSwapchains, surface_state,
10805                                            old_swapchain_state);
10806
10807    return result;
10808}
10809
10810VKAPI_ATTR VkResult VKAPI_CALL AcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
10811                                                   VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
10812    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10813    bool skip = false;
10814
10815    std::unique_lock<std::mutex> lock(global_lock);
10816
10817    if (fence == VK_NULL_HANDLE && semaphore == VK_NULL_HANDLE) {
10818        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10819                        reinterpret_cast<uint64_t &>(device), __LINE__, DRAWSTATE_SWAPCHAIN_NO_SYNC_FOR_ACQUIRE, "DS",
10820                        "vkAcquireNextImageKHR: Semaphore and fence cannot both be VK_NULL_HANDLE. There would be no way "
10821                        "to determine the completion of this operation.");
10822    }
10823
10824    auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
10825    if (pSemaphore && pSemaphore->signaled) {
10826        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
10827                        reinterpret_cast<const uint64_t &>(semaphore), __LINE__, VALIDATION_ERROR_01952, "DS",
10828                        "vkAcquireNextImageKHR: Semaphore must not be currently signaled or in a wait state. %s",
10829                        validation_error_map[VALIDATION_ERROR_01952]);
10830    }
10831
10832    auto pFence = GetFenceNode(dev_data, fence);
10833    if (pFence) {
10834        skip |= ValidateFenceForSubmit(dev_data, pFence);
10835    }
10836
10837    auto swapchain_data = GetSwapchainNode(dev_data, swapchain);
10838
10839    if (swapchain_data->replaced) {
10840        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10841                        reinterpret_cast<uint64_t &>(swapchain), __LINE__, DRAWSTATE_SWAPCHAIN_REPLACED, "DS",
10842                        "vkAcquireNextImageKHR: This swapchain has been replaced. The application can still "
10843                        "present any images it has acquired, but cannot acquire any more.");
10844    }
10845
10846    auto physical_device_state = GetPhysicalDeviceState(dev_data->instance_data, dev_data->physical_device);
10847    if (physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState != UNCALLED) {
10848        uint64_t acquired_images = std::count_if(swapchain_data->images.begin(), swapchain_data->images.end(),
10849                                                 [=](VkImage image) { return GetImageState(dev_data, image)->acquired; });
10850        if (acquired_images > swapchain_data->images.size() - physical_device_state->surfaceCapabilities.minImageCount) {
10851            skip |=
10852                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10853                        reinterpret_cast<uint64_t const &>(swapchain), __LINE__, DRAWSTATE_SWAPCHAIN_TOO_MANY_IMAGES, "DS",
10854                        "vkAcquireNextImageKHR: Application has already acquired the maximum number of images (0x%" PRIxLEAST64 ")",
10855                        acquired_images);
10856        }
10857    }
10858
10859    if (swapchain_data->images.size() == 0) {
10860        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10861                        reinterpret_cast<uint64_t const &>(swapchain), __LINE__, DRAWSTATE_SWAPCHAIN_IMAGES_NOT_FOUND, "DS",
10862                        "vkAcquireNextImageKHR: No images found to acquire from. Application probably did not call "
10863                        "vkGetSwapchainImagesKHR after swapchain creation.");
10864    }
10865
10866    lock.unlock();
10867
10868    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
10869
10870    VkResult result = dev_data->dispatch_table.AcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex);
10871
10872    lock.lock();
10873    if (result == VK_SUCCESS || result == VK_SUBOPTIMAL_KHR) {
10874        if (pFence) {
10875            pFence->state = FENCE_INFLIGHT;
10876            pFence->signaler.first = VK_NULL_HANDLE;  // ANI isn't on a queue, so this can't participate in a completion proof.
10877        }
10878
10879        // A successful call to AcquireNextImageKHR counts as a signal operation on semaphore
10880        if (pSemaphore) {
10881            pSemaphore->signaled = true;
10882            pSemaphore->signaler.first = VK_NULL_HANDLE;
10883        }
10884
10885        // Mark the image as acquired.
10886        auto image = swapchain_data->images[*pImageIndex];
10887        auto image_state = GetImageState(dev_data, image);
10888        image_state->acquired = true;
10889    }
10890    lock.unlock();
10891
10892    return result;
10893}
10894
10895VKAPI_ATTR VkResult VKAPI_CALL EnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount,
10896                                                        VkPhysicalDevice *pPhysicalDevices) {
10897    bool skip = false;
10898    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
10899    assert(instance_data);
10900
10901    // For this instance, flag when vkEnumeratePhysicalDevices goes to QUERY_COUNT and then QUERY_DETAILS
10902    if (NULL == pPhysicalDevices) {
10903        instance_data->vkEnumeratePhysicalDevicesState = QUERY_COUNT;
10904    } else {
10905        if (UNCALLED == instance_data->vkEnumeratePhysicalDevicesState) {
10906            // Flag warning here. You can call this without having queried the count, but it may not be
10907            // robust on platforms with multiple physical devices.
10908            skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT,
10909                            0, __LINE__, DEVLIMITS_MISSING_QUERY_COUNT, "DL",
10910                            "Call sequence has vkEnumeratePhysicalDevices() w/ non-NULL pPhysicalDevices. You should first "
10911                            "call vkEnumeratePhysicalDevices() w/ NULL pPhysicalDevices to query pPhysicalDeviceCount.");
10912        }  // TODO : Could also flag a warning if re-calling this function in QUERY_DETAILS state
10913        else if (instance_data->physical_devices_count != *pPhysicalDeviceCount) {
10914            // Having actual count match count from app is not a requirement, so this can be a warning
10915            skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
10916                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
10917                            "Call to vkEnumeratePhysicalDevices() w/ pPhysicalDeviceCount value %u, but actual count "
10918                            "supported by this instance is %u.",
10919                            *pPhysicalDeviceCount, instance_data->physical_devices_count);
10920        }
10921        instance_data->vkEnumeratePhysicalDevicesState = QUERY_DETAILS;
10922    }
10923    if (skip) {
10924        return VK_ERROR_VALIDATION_FAILED_EXT;
10925    }
10926    VkResult result = instance_data->dispatch_table.EnumeratePhysicalDevices(instance, pPhysicalDeviceCount, pPhysicalDevices);
10927    if (NULL == pPhysicalDevices) {
10928        instance_data->physical_devices_count = *pPhysicalDeviceCount;
10929    } else if (result == VK_SUCCESS) {  // Save physical devices
10930        for (uint32_t i = 0; i < *pPhysicalDeviceCount; i++) {
10931            auto &phys_device_state = instance_data->physical_device_map[pPhysicalDevices[i]];
10932            phys_device_state.phys_device = pPhysicalDevices[i];
10933            // Init actual features for each physical device
10934            instance_data->dispatch_table.GetPhysicalDeviceFeatures(pPhysicalDevices[i], &phys_device_state.features);
10935        }
10936    }
10937    return result;
10938}
10939
10940// Common function to handle validation for GetPhysicalDeviceQueueFamilyProperties & 2KHR version
10941static bool ValidateCommonGetPhysicalDeviceQueueFamilyProperties(instance_layer_data *instance_data,
10942                                                                 PHYSICAL_DEVICE_STATE *pd_state,
10943                                                                 uint32_t *pQueueFamilyPropertyCount, bool qfp_null,
10944                                                                 const char *count_var_name, const char *caller_name) {
10945    bool skip = false;
10946    if (qfp_null) {
10947        pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_COUNT;
10948    } else {
10949        // Verify that for each physical device, this function is called first with NULL pQueueFamilyProperties ptr in order to get
10950        // count
10951        if (UNCALLED == pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState) {
10952            skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
10953                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_MISSING_QUERY_COUNT, "DL",
10954                            "Call sequence has %s() w/ non-NULL "
10955                            "pQueueFamilyProperties. You should first call %s() w/ "
10956                            "NULL pQueueFamilyProperties to query pCount.",
10957                            caller_name, caller_name);
10958        }
10959        // Then verify that pCount that is passed in on second call matches what was returned
10960        if (pd_state->queueFamilyPropertiesCount != *pQueueFamilyPropertyCount) {
10961            // TODO: this is not a requirement of the Valid Usage section for vkGetPhysicalDeviceQueueFamilyProperties, so
10962            // provide as warning
10963            skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
10964                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
10965                            "Call to %s() w/ %s value %u, but actual count supported by this physicalDevice is %u.", caller_name,
10966                            count_var_name, *pQueueFamilyPropertyCount, pd_state->queueFamilyPropertiesCount);
10967        }
10968        pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_DETAILS;
10969    }
10970    return skip;
10971}
10972
10973static bool PreCallValidateGetPhysicalDeviceQueueFamilyProperties(instance_layer_data *instance_data,
10974                                                                  PHYSICAL_DEVICE_STATE *pd_state, uint32_t *pCount,
10975                                                                  VkQueueFamilyProperties *pQueueFamilyProperties) {
10976    return ValidateCommonGetPhysicalDeviceQueueFamilyProperties(instance_data, pd_state, pCount,
10977                                                                (nullptr == pQueueFamilyProperties), "pCount",
10978                                                                "vkGetPhysicalDeviceQueueFamilyProperties()");
10979}
10980
10981static bool PreCallValidateGetPhysicalDeviceQueueFamilyProperties2KHR(instance_layer_data *instance_data,
10982                                                                      PHYSICAL_DEVICE_STATE *pd_state,
10983                                                                      uint32_t *pQueueFamilyPropertyCount,
10984                                                                      VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
10985    return ValidateCommonGetPhysicalDeviceQueueFamilyProperties(instance_data, pd_state, pQueueFamilyPropertyCount,
10986                                                                (nullptr == pQueueFamilyProperties), "pQueueFamilyPropertyCount",
10987                                                                "vkGetPhysicalDeviceQueueFamilyProperties2KHR()");
10988}
10989
10990// Common function to update state for GetPhysicalDeviceQueueFamilyProperties & 2KHR version
10991static void StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(PHYSICAL_DEVICE_STATE *pd_state, uint32_t count,
10992                                                                    VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
10993    if (!pQueueFamilyProperties) {
10994        pd_state->queueFamilyPropertiesCount = count;
10995    } else {  // Save queue family properties
10996        if (pd_state->queue_family_properties.size() < count) pd_state->queue_family_properties.resize(count);
10997        for (uint32_t i = 0; i < count; i++) {
10998            pd_state->queue_family_properties[i] = pQueueFamilyProperties[i].queueFamilyProperties;
10999        }
11000    }
11001}
11002
11003static void PostCallRecordGetPhysicalDeviceQueueFamilyProperties(PHYSICAL_DEVICE_STATE *pd_state, uint32_t count,
11004                                                                 VkQueueFamilyProperties *pQueueFamilyProperties) {
11005    VkQueueFamilyProperties2KHR *pqfp = nullptr;
11006    std::vector<VkQueueFamilyProperties2KHR> qfp;
11007    qfp.resize(count);
11008    if (pQueueFamilyProperties) {
11009        for (uint32_t i = 0; i < count; ++i) {
11010            qfp[i].sType = VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2_KHR;
11011            qfp[i].pNext = nullptr;
11012            qfp[i].queueFamilyProperties = pQueueFamilyProperties[i];
11013        }
11014        pqfp = qfp.data();
11015    }
11016    StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(pd_state, count, pqfp);
11017}
11018
11019static void PostCallRecordGetPhysicalDeviceQueueFamilyProperties2KHR(PHYSICAL_DEVICE_STATE *pd_state, uint32_t count,
11020                                                                     VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
11021    StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(pd_state, count, pQueueFamilyProperties);
11022}
11023
11024VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
11025                                                                  VkQueueFamilyProperties *pQueueFamilyProperties) {
11026    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11027    auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
11028    assert(physical_device_state);
11029    bool skip =
11030        PreCallValidateGetPhysicalDeviceQueueFamilyProperties(instance_data, physical_device_state, pCount, pQueueFamilyProperties);
11031    if (skip) {
11032        return;
11033    }
11034    instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(physicalDevice, pCount, pQueueFamilyProperties);
11035    PostCallRecordGetPhysicalDeviceQueueFamilyProperties(physical_device_state, *pCount, pQueueFamilyProperties);
11036}
11037
11038VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceQueueFamilyProperties2KHR(VkPhysicalDevice physicalDevice,
11039                                                                      uint32_t *pQueueFamilyPropertyCount,
11040                                                                      VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
11041    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11042    auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
11043    assert(physical_device_state);
11044    bool skip = PreCallValidateGetPhysicalDeviceQueueFamilyProperties2KHR(instance_data, physical_device_state,
11045                                                                          pQueueFamilyPropertyCount, pQueueFamilyProperties);
11046    if (skip) {
11047        return;
11048    }
11049    instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties2KHR(physicalDevice, pQueueFamilyPropertyCount,
11050                                                                             pQueueFamilyProperties);
11051    PostCallRecordGetPhysicalDeviceQueueFamilyProperties2KHR(physical_device_state, *pQueueFamilyPropertyCount,
11052                                                             pQueueFamilyProperties);
11053}
11054
11055template <typename TCreateInfo, typename FPtr>
11056static VkResult CreateSurface(VkInstance instance, TCreateInfo const *pCreateInfo, VkAllocationCallbacks const *pAllocator,
11057                              VkSurfaceKHR *pSurface, FPtr fptr) {
11058    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
11059
11060    // Call down the call chain:
11061    VkResult result = (instance_data->dispatch_table.*fptr)(instance, pCreateInfo, pAllocator, pSurface);
11062
11063    if (result == VK_SUCCESS) {
11064        std::unique_lock<std::mutex> lock(global_lock);
11065        instance_data->surface_map[*pSurface] = SURFACE_STATE(*pSurface);
11066        lock.unlock();
11067    }
11068
11069    return result;
11070}
11071
11072VKAPI_ATTR void VKAPI_CALL DestroySurfaceKHR(VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks *pAllocator) {
11073    bool skip = false;
11074    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
11075    std::unique_lock<std::mutex> lock(global_lock);
11076    auto surface_state = GetSurfaceState(instance_data, surface);
11077
11078    if (surface_state) {
11079        // TODO: track swapchains created from this surface.
11080        instance_data->surface_map.erase(surface);
11081    }
11082    lock.unlock();
11083
11084    if (!skip) {
11085        // Call down the call chain:
11086        instance_data->dispatch_table.DestroySurfaceKHR(instance, surface, pAllocator);
11087    }
11088}
11089
11090VKAPI_ATTR VkResult VKAPI_CALL CreateDisplayPlaneSurfaceKHR(VkInstance instance, const VkDisplaySurfaceCreateInfoKHR *pCreateInfo,
11091                                                            const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11092    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateDisplayPlaneSurfaceKHR);
11093}
11094
11095#ifdef VK_USE_PLATFORM_ANDROID_KHR
11096VKAPI_ATTR VkResult VKAPI_CALL CreateAndroidSurfaceKHR(VkInstance instance, const VkAndroidSurfaceCreateInfoKHR *pCreateInfo,
11097                                                       const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11098    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateAndroidSurfaceKHR);
11099}
11100#endif  // VK_USE_PLATFORM_ANDROID_KHR
11101
11102#ifdef VK_USE_PLATFORM_MIR_KHR
11103VKAPI_ATTR VkResult VKAPI_CALL CreateMirSurfaceKHR(VkInstance instance, const VkMirSurfaceCreateInfoKHR *pCreateInfo,
11104                                                   const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11105    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateMirSurfaceKHR);
11106}
11107#endif  // VK_USE_PLATFORM_MIR_KHR
11108
11109#ifdef VK_USE_PLATFORM_WAYLAND_KHR
11110VKAPI_ATTR VkResult VKAPI_CALL CreateWaylandSurfaceKHR(VkInstance instance, const VkWaylandSurfaceCreateInfoKHR *pCreateInfo,
11111                                                       const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11112    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateWaylandSurfaceKHR);
11113}
11114#endif  // VK_USE_PLATFORM_WAYLAND_KHR
11115
11116#ifdef VK_USE_PLATFORM_WIN32_KHR
11117VKAPI_ATTR VkResult VKAPI_CALL CreateWin32SurfaceKHR(VkInstance instance, const VkWin32SurfaceCreateInfoKHR *pCreateInfo,
11118                                                     const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11119    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateWin32SurfaceKHR);
11120}
11121#endif  // VK_USE_PLATFORM_WIN32_KHR
11122
11123#ifdef VK_USE_PLATFORM_XCB_KHR
11124VKAPI_ATTR VkResult VKAPI_CALL CreateXcbSurfaceKHR(VkInstance instance, const VkXcbSurfaceCreateInfoKHR *pCreateInfo,
11125                                                   const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11126    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateXcbSurfaceKHR);
11127}
11128#endif  // VK_USE_PLATFORM_XCB_KHR
11129
11130#ifdef VK_USE_PLATFORM_XLIB_KHR
11131VKAPI_ATTR VkResult VKAPI_CALL CreateXlibSurfaceKHR(VkInstance instance, const VkXlibSurfaceCreateInfoKHR *pCreateInfo,
11132                                                    const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11133    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateXlibSurfaceKHR);
11134}
11135#endif  // VK_USE_PLATFORM_XLIB_KHR
11136
11137VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
11138                                                                       VkSurfaceCapabilitiesKHR *pSurfaceCapabilities) {
11139    auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11140
11141    std::unique_lock<std::mutex> lock(global_lock);
11142    auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
11143    lock.unlock();
11144
11145    auto result =
11146        instance_data->dispatch_table.GetPhysicalDeviceSurfaceCapabilitiesKHR(physicalDevice, surface, pSurfaceCapabilities);
11147
11148    if (result == VK_SUCCESS) {
11149        physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState = QUERY_DETAILS;
11150        physical_device_state->surfaceCapabilities = *pSurfaceCapabilities;
11151    }
11152
11153    return result;
11154}
11155
11156VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex,
11157                                                                  VkSurfaceKHR surface, VkBool32 *pSupported) {
11158    auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11159    std::unique_lock<std::mutex> lock(global_lock);
11160    auto surface_state = GetSurfaceState(instance_data, surface);
11161    lock.unlock();
11162
11163    auto result =
11164        instance_data->dispatch_table.GetPhysicalDeviceSurfaceSupportKHR(physicalDevice, queueFamilyIndex, surface, pSupported);
11165
11166    if (result == VK_SUCCESS) {
11167        surface_state->gpu_queue_support[{physicalDevice, queueFamilyIndex}] = (*pSupported != 0);
11168    }
11169
11170    return result;
11171}
11172
11173VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfacePresentModesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
11174                                                                       uint32_t *pPresentModeCount,
11175                                                                       VkPresentModeKHR *pPresentModes) {
11176    bool skip = false;
11177    auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11178    std::unique_lock<std::mutex> lock(global_lock);
11179    // TODO: this isn't quite right. available modes may differ by surface AND physical device.
11180    auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
11181    auto &call_state = physical_device_state->vkGetPhysicalDeviceSurfacePresentModesKHRState;
11182
11183    if (pPresentModes) {
11184        // Compare the preliminary value of *pPresentModeCount with the value this time:
11185        auto prev_mode_count = (uint32_t)physical_device_state->present_modes.size();
11186        switch (call_state) {
11187            case UNCALLED:
11188                skip |= log_msg(
11189                    instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
11190                    reinterpret_cast<uint64_t>(physicalDevice), __LINE__, DEVLIMITS_MUST_QUERY_COUNT, "DL",
11191                    "vkGetPhysicalDeviceSurfacePresentModesKHR() called with non-NULL pPresentModeCount; but no prior positive "
11192                    "value has been seen for pPresentModeCount.");
11193                break;
11194            default:
11195                // both query count and query details
11196                if (*pPresentModeCount != prev_mode_count) {
11197                    skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11198                                    VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, reinterpret_cast<uint64_t>(physicalDevice),
11199                                    __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
11200                                    "vkGetPhysicalDeviceSurfacePresentModesKHR() called with *pPresentModeCount (%u) that "
11201                                    "differs from the value "
11202                                    "(%u) that was returned when pPresentModes was NULL.",
11203                                    *pPresentModeCount, prev_mode_count);
11204                }
11205                break;
11206        }
11207    }
11208    lock.unlock();
11209
11210    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
11211
11212    auto result = instance_data->dispatch_table.GetPhysicalDeviceSurfacePresentModesKHR(physicalDevice, surface, pPresentModeCount,
11213                                                                                        pPresentModes);
11214
11215    if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
11216        lock.lock();
11217
11218        if (*pPresentModeCount) {
11219            if (call_state < QUERY_COUNT) call_state = QUERY_COUNT;
11220            if (*pPresentModeCount > physical_device_state->present_modes.size())
11221                physical_device_state->present_modes.resize(*pPresentModeCount);
11222        }
11223        if (pPresentModes) {
11224            if (call_state < QUERY_DETAILS) call_state = QUERY_DETAILS;
11225            for (uint32_t i = 0; i < *pPresentModeCount; i++) {
11226                physical_device_state->present_modes[i] = pPresentModes[i];
11227            }
11228        }
11229    }
11230
11231    return result;
11232}
11233
11234VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
11235                                                                  uint32_t *pSurfaceFormatCount,
11236                                                                  VkSurfaceFormatKHR *pSurfaceFormats) {
11237    bool skip = false;
11238    auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11239    std::unique_lock<std::mutex> lock(global_lock);
11240    auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
11241    auto &call_state = physical_device_state->vkGetPhysicalDeviceSurfaceFormatsKHRState;
11242
11243    if (pSurfaceFormats) {
11244        auto prev_format_count = (uint32_t)physical_device_state->surface_formats.size();
11245
11246        switch (call_state) {
11247            case UNCALLED:
11248                // Since we haven't recorded a preliminary value of *pSurfaceFormatCount, that likely means that the application
11249                // didn't
11250                // previously call this function with a NULL value of pSurfaceFormats:
11251                skip |= log_msg(
11252                    instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
11253                    reinterpret_cast<uint64_t>(physicalDevice), __LINE__, DEVLIMITS_MUST_QUERY_COUNT, "DL",
11254                    "vkGetPhysicalDeviceSurfaceFormatsKHR() called with non-NULL pSurfaceFormatCount; but no prior positive "
11255                    "value has been seen for pSurfaceFormats.");
11256                break;
11257            default:
11258                if (prev_format_count != *pSurfaceFormatCount) {
11259                    skip |= log_msg(
11260                        instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11261                        VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, reinterpret_cast<uint64_t>(physicalDevice), __LINE__,
11262                        DEVLIMITS_COUNT_MISMATCH, "DL",
11263                        "vkGetPhysicalDeviceSurfaceFormatsKHR() called with non-NULL pSurfaceFormatCount, and with pSurfaceFormats "
11264                        "set "
11265                        "to "
11266                        "a value (%u) that is greater than the value (%u) that was returned when pSurfaceFormatCount was NULL.",
11267                        *pSurfaceFormatCount, prev_format_count);
11268                }
11269                break;
11270        }
11271    }
11272    lock.unlock();
11273
11274    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
11275
11276    // Call down the call chain:
11277    auto result = instance_data->dispatch_table.GetPhysicalDeviceSurfaceFormatsKHR(physicalDevice, surface, pSurfaceFormatCount,
11278                                                                                   pSurfaceFormats);
11279
11280    if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
11281        lock.lock();
11282
11283        if (*pSurfaceFormatCount) {
11284            if (call_state < QUERY_COUNT) call_state = QUERY_COUNT;
11285            if (*pSurfaceFormatCount > physical_device_state->surface_formats.size())
11286                physical_device_state->surface_formats.resize(*pSurfaceFormatCount);
11287        }
11288        if (pSurfaceFormats) {
11289            if (call_state < QUERY_DETAILS) call_state = QUERY_DETAILS;
11290            for (uint32_t i = 0; i < *pSurfaceFormatCount; i++) {
11291                physical_device_state->surface_formats[i] = pSurfaceFormats[i];
11292            }
11293        }
11294    }
11295    return result;
11296}
11297
11298VKAPI_ATTR VkResult VKAPI_CALL CreateDebugReportCallbackEXT(VkInstance instance,
11299                                                            const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
11300                                                            const VkAllocationCallbacks *pAllocator,
11301                                                            VkDebugReportCallbackEXT *pMsgCallback) {
11302    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
11303    VkResult res = instance_data->dispatch_table.CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
11304    if (VK_SUCCESS == res) {
11305        std::lock_guard<std::mutex> lock(global_lock);
11306        res = layer_create_msg_callback(instance_data->report_data, false, pCreateInfo, pAllocator, pMsgCallback);
11307    }
11308    return res;
11309}
11310
11311VKAPI_ATTR void VKAPI_CALL DestroyDebugReportCallbackEXT(VkInstance instance, VkDebugReportCallbackEXT msgCallback,
11312                                                         const VkAllocationCallbacks *pAllocator) {
11313    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
11314    instance_data->dispatch_table.DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
11315    std::lock_guard<std::mutex> lock(global_lock);
11316    layer_destroy_msg_callback(instance_data->report_data, msgCallback, pAllocator);
11317}
11318
11319VKAPI_ATTR void VKAPI_CALL DebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags,
11320                                                 VkDebugReportObjectTypeEXT objType, uint64_t object, size_t location,
11321                                                 int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
11322    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
11323    instance_data->dispatch_table.DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, pMsg);
11324}
11325
11326VKAPI_ATTR VkResult VKAPI_CALL EnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
11327    return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
11328}
11329
11330VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
11331                                                              VkLayerProperties *pProperties) {
11332    return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
11333}
11334
11335VKAPI_ATTR VkResult VKAPI_CALL EnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount,
11336                                                                    VkExtensionProperties *pProperties) {
11337    if (pLayerName && !strcmp(pLayerName, global_layer.layerName))
11338        return util_GetExtensionProperties(1, instance_extensions, pCount, pProperties);
11339
11340    return VK_ERROR_LAYER_NOT_PRESENT;
11341}
11342
11343VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice, const char *pLayerName,
11344                                                                  uint32_t *pCount, VkExtensionProperties *pProperties) {
11345    if (pLayerName && !strcmp(pLayerName, global_layer.layerName)) return util_GetExtensionProperties(0, NULL, pCount, pProperties);
11346
11347    assert(physicalDevice);
11348
11349    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11350    return instance_data->dispatch_table.EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties);
11351}
11352
11353VKAPI_ATTR VkResult VKAPI_CALL EnumeratePhysicalDeviceGroupsKHX(
11354    VkInstance instance, uint32_t *pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupPropertiesKHX *pPhysicalDeviceGroupProperties) {
11355    bool skip = false;
11356    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
11357
11358    if (instance_data) {
11359        // For this instance, flag when EnumeratePhysicalDeviceGroupsKHX goes to QUERY_COUNT and then QUERY_DETAILS.
11360        if (NULL == pPhysicalDeviceGroupProperties) {
11361            instance_data->vkEnumeratePhysicalDeviceGroupsState = QUERY_COUNT;
11362        } else {
11363            if (UNCALLED == instance_data->vkEnumeratePhysicalDeviceGroupsState) {
11364                // Flag warning here. You can call this without having queried the count, but it may not be
11365                // robust on platforms with multiple physical devices.
11366                skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11367                                VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, 0, __LINE__, DEVLIMITS_MISSING_QUERY_COUNT, "DL",
11368                                "Call sequence has vkEnumeratePhysicalDeviceGroupsKHX() w/ non-NULL "
11369                                "pPhysicalDeviceGroupProperties. You should first "
11370                                "call vkEnumeratePhysicalDeviceGroupsKHX() w/ NULL pPhysicalDeviceGroupProperties to query "
11371                                "pPhysicalDeviceGroupCount.");
11372            } // TODO : Could also flag a warning if re-calling this function in QUERY_DETAILS state
11373            else if (instance_data->physical_device_groups_count != *pPhysicalDeviceGroupCount) {
11374                // Having actual count match count from app is not a requirement, so this can be a warning
11375                skip |=
11376                    log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11377                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
11378                            "Call to vkEnumeratePhysicalDeviceGroupsKHX() w/ pPhysicalDeviceGroupCount value %u, but actual count "
11379                            "supported by this instance is %u.",
11380                            *pPhysicalDeviceGroupCount, instance_data->physical_device_groups_count);
11381            }
11382            instance_data->vkEnumeratePhysicalDeviceGroupsState = QUERY_DETAILS;
11383        }
11384        if (skip) {
11385            return VK_ERROR_VALIDATION_FAILED_EXT;
11386        }
11387        VkResult result = instance_data->dispatch_table.EnumeratePhysicalDeviceGroupsKHX(instance, pPhysicalDeviceGroupCount,
11388            pPhysicalDeviceGroupProperties);
11389        if (NULL == pPhysicalDeviceGroupProperties) {
11390            instance_data->physical_device_groups_count = *pPhysicalDeviceGroupCount;
11391        } else if (result == VK_SUCCESS) { // Save physical devices
11392            for (uint32_t i = 0; i < *pPhysicalDeviceGroupCount; i++) {
11393                for (uint32_t j = 0; j < pPhysicalDeviceGroupProperties[i].physicalDeviceCount; j++) {
11394                    VkPhysicalDevice cur_phys_dev = pPhysicalDeviceGroupProperties[i].physicalDevices[j];
11395                    auto &phys_device_state = instance_data->physical_device_map[cur_phys_dev];
11396                    phys_device_state.phys_device = cur_phys_dev;
11397                    // Init actual features for each physical device
11398                    instance_data->dispatch_table.GetPhysicalDeviceFeatures(cur_phys_dev, &phys_device_state.features);
11399                }
11400            }
11401        }
11402        return result;
11403    } else {
11404        log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, 0, __LINE__,
11405            DEVLIMITS_INVALID_INSTANCE, "DL",
11406            "Invalid instance (0x%" PRIxLEAST64 ") passed into vkEnumeratePhysicalDeviceGroupsKHX().", (uint64_t)instance);
11407    }
11408    return VK_ERROR_VALIDATION_FAILED_EXT;
11409}
11410
11411VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorUpdateTemplateKHR(VkDevice device,
11412                                                                 const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo,
11413                                                                 const VkAllocationCallbacks *pAllocator,
11414                                                                 VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate) {
11415    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
11416    VkResult result =
11417        dev_data->dispatch_table.CreateDescriptorUpdateTemplateKHR(device, pCreateInfo, pAllocator, pDescriptorUpdateTemplate);
11418    if (VK_SUCCESS == result) {
11419        std::lock_guard<std::mutex> lock(global_lock);
11420        // Shadow template createInfo for later updates
11421        safe_VkDescriptorUpdateTemplateCreateInfoKHR *local_create_info =
11422            new safe_VkDescriptorUpdateTemplateCreateInfoKHR(pCreateInfo);
11423        std::unique_ptr<TEMPLATE_STATE> template_state(new TEMPLATE_STATE(*pDescriptorUpdateTemplate, local_create_info));
11424        dev_data->desc_template_map[*pDescriptorUpdateTemplate] = std::move(template_state);
11425    }
11426    return result;
11427}
11428
11429VKAPI_ATTR void VKAPI_CALL DestroyDescriptorUpdateTemplateKHR(VkDevice device,
11430                                                              VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
11431                                                              const VkAllocationCallbacks *pAllocator) {
11432    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
11433    std::unique_lock<std::mutex> lock(global_lock);
11434    dev_data->desc_template_map.erase(descriptorUpdateTemplate);
11435    lock.unlock();
11436    dev_data->dispatch_table.DestroyDescriptorUpdateTemplateKHR(device, descriptorUpdateTemplate, pAllocator);
11437}
11438
11439// PostCallRecord* handles recording state updates following call down chain to UpdateDescriptorSetsWithTemplate()
11440static void PostCallRecordUpdateDescriptorSetWithTemplateKHR(layer_data *device_data, VkDescriptorSet descriptorSet,
11441                                                             VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
11442                                                             const void *pData) {
11443    auto const template_map_entry = device_data->desc_template_map.find(descriptorUpdateTemplate);
11444    if (template_map_entry == device_data->desc_template_map.end()) {
11445        assert(0);
11446    }
11447
11448    cvdescriptorset::PerformUpdateDescriptorSetsWithTemplateKHR(device_data, descriptorSet, template_map_entry->second, pData);
11449}
11450
11451VKAPI_ATTR void VKAPI_CALL UpdateDescriptorSetWithTemplateKHR(VkDevice device, VkDescriptorSet descriptorSet,
11452                                                              VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
11453                                                              const void *pData) {
11454    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
11455    device_data->dispatch_table.UpdateDescriptorSetWithTemplateKHR(device, descriptorSet, descriptorUpdateTemplate, pData);
11456
11457    PostCallRecordUpdateDescriptorSetWithTemplateKHR(device_data, descriptorSet, descriptorUpdateTemplate, pData);
11458}
11459
11460VKAPI_ATTR void VKAPI_CALL CmdPushDescriptorSetWithTemplateKHR(VkCommandBuffer commandBuffer,
11461                                                               VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
11462                                                               VkPipelineLayout layout, uint32_t set, const void *pData) {
11463    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
11464    dev_data->dispatch_table.CmdPushDescriptorSetWithTemplateKHR(commandBuffer, descriptorUpdateTemplate, layout, set, pData);
11465}
11466
11467static PFN_vkVoidFunction intercept_core_instance_command(const char *name);
11468
11469static PFN_vkVoidFunction intercept_core_device_command(const char *name);
11470
11471static PFN_vkVoidFunction intercept_device_extension_command(const char *name, VkDevice device);
11472
11473static PFN_vkVoidFunction intercept_khr_swapchain_command(const char *name, VkDevice dev);
11474
11475static PFN_vkVoidFunction intercept_khr_surface_command(const char *name, VkInstance instance);
11476
11477static PFN_vkVoidFunction
11478intercept_extension_instance_commands(const char *name, VkInstance instance);
11479
11480VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetDeviceProcAddr(VkDevice dev, const char *funcName) {
11481    assert(dev);
11482
11483    PFN_vkVoidFunction proc = intercept_core_device_command(funcName);
11484    if (!proc) proc = intercept_device_extension_command(funcName, dev);
11485    if (!proc) proc = intercept_khr_swapchain_command(funcName, dev);
11486    if (proc) return proc;
11487
11488    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(dev), layer_data_map);
11489    auto &table = dev_data->dispatch_table;
11490    if (!table.GetDeviceProcAddr) return nullptr;
11491    return table.GetDeviceProcAddr(dev, funcName);
11492}
11493
11494VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetInstanceProcAddr(VkInstance instance, const char *funcName) {
11495    PFN_vkVoidFunction proc = intercept_core_instance_command(funcName);
11496    if (!proc) proc = intercept_core_device_command(funcName);
11497    if (!proc) proc = intercept_khr_swapchain_command(funcName, VK_NULL_HANDLE);
11498    if (!proc) proc = intercept_khr_surface_command(funcName, instance);
11499    if (proc) return proc;
11500
11501    assert(instance);
11502
11503    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
11504    proc = debug_report_get_instance_proc_addr(instance_data->report_data, funcName);
11505    if (proc) return proc;
11506
11507    proc = intercept_extension_instance_commands(funcName, instance);
11508    if (proc) return proc;
11509
11510    auto &table = instance_data->dispatch_table;
11511    if (!table.GetInstanceProcAddr) return nullptr;
11512    return table.GetInstanceProcAddr(instance, funcName);
11513}
11514
11515VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetPhysicalDeviceProcAddr(VkInstance instance, const char *funcName) {
11516    assert(instance);
11517
11518    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
11519
11520    auto &table = instance_data->dispatch_table;
11521    if (!table.GetPhysicalDeviceProcAddr) return nullptr;
11522    return table.GetPhysicalDeviceProcAddr(instance, funcName);
11523}
11524
11525static PFN_vkVoidFunction intercept_core_instance_command(const char *name) {
11526    static const struct {
11527        const char *name;
11528        PFN_vkVoidFunction proc;
11529    } core_instance_commands[] = {
11530        {"vkGetInstanceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetInstanceProcAddr)},
11531        {"vk_layerGetPhysicalDeviceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceProcAddr)},
11532        {"vkGetDeviceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceProcAddr)},
11533        {"vkCreateInstance", reinterpret_cast<PFN_vkVoidFunction>(CreateInstance)},
11534        {"vkCreateDevice", reinterpret_cast<PFN_vkVoidFunction>(CreateDevice)},
11535        {"vkEnumeratePhysicalDevices", reinterpret_cast<PFN_vkVoidFunction>(EnumeratePhysicalDevices)},
11536        {"vkGetPhysicalDeviceQueueFamilyProperties", reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceQueueFamilyProperties)},
11537        {"vkDestroyInstance", reinterpret_cast<PFN_vkVoidFunction>(DestroyInstance)},
11538        {"vkEnumerateInstanceLayerProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateInstanceLayerProperties)},
11539        {"vkEnumerateDeviceLayerProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateDeviceLayerProperties)},
11540        {"vkEnumerateInstanceExtensionProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateInstanceExtensionProperties)},
11541        {"vkEnumerateDeviceExtensionProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateDeviceExtensionProperties)},
11542    };
11543
11544    for (size_t i = 0; i < ARRAY_SIZE(core_instance_commands); i++) {
11545        if (!strcmp(core_instance_commands[i].name, name)) return core_instance_commands[i].proc;
11546    }
11547
11548    return nullptr;
11549}
11550
11551static PFN_vkVoidFunction intercept_core_device_command(const char *name) {
11552    static const struct {
11553        const char *name;
11554        PFN_vkVoidFunction proc;
11555    } core_device_commands[] = {
11556        {"vkGetDeviceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceProcAddr)},
11557        {"vkQueueSubmit", reinterpret_cast<PFN_vkVoidFunction>(QueueSubmit)},
11558        {"vkWaitForFences", reinterpret_cast<PFN_vkVoidFunction>(WaitForFences)},
11559        {"vkGetFenceStatus", reinterpret_cast<PFN_vkVoidFunction>(GetFenceStatus)},
11560        {"vkQueueWaitIdle", reinterpret_cast<PFN_vkVoidFunction>(QueueWaitIdle)},
11561        {"vkDeviceWaitIdle", reinterpret_cast<PFN_vkVoidFunction>(DeviceWaitIdle)},
11562        {"vkGetDeviceQueue", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceQueue)},
11563        {"vkDestroyInstance", reinterpret_cast<PFN_vkVoidFunction>(DestroyInstance)},
11564        {"vkDestroyDevice", reinterpret_cast<PFN_vkVoidFunction>(DestroyDevice)},
11565        {"vkDestroyFence", reinterpret_cast<PFN_vkVoidFunction>(DestroyFence)},
11566        {"vkResetFences", reinterpret_cast<PFN_vkVoidFunction>(ResetFences)},
11567        {"vkDestroySemaphore", reinterpret_cast<PFN_vkVoidFunction>(DestroySemaphore)},
11568        {"vkDestroyEvent", reinterpret_cast<PFN_vkVoidFunction>(DestroyEvent)},
11569        {"vkDestroyQueryPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyQueryPool)},
11570        {"vkDestroyBuffer", reinterpret_cast<PFN_vkVoidFunction>(DestroyBuffer)},
11571        {"vkDestroyBufferView", reinterpret_cast<PFN_vkVoidFunction>(DestroyBufferView)},
11572        {"vkDestroyImage", reinterpret_cast<PFN_vkVoidFunction>(DestroyImage)},
11573        {"vkDestroyImageView", reinterpret_cast<PFN_vkVoidFunction>(DestroyImageView)},
11574        {"vkDestroyShaderModule", reinterpret_cast<PFN_vkVoidFunction>(DestroyShaderModule)},
11575        {"vkDestroyPipeline", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipeline)},
11576        {"vkDestroyPipelineLayout", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipelineLayout)},
11577        {"vkDestroySampler", reinterpret_cast<PFN_vkVoidFunction>(DestroySampler)},
11578        {"vkDestroyDescriptorSetLayout", reinterpret_cast<PFN_vkVoidFunction>(DestroyDescriptorSetLayout)},
11579        {"vkDestroyDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyDescriptorPool)},
11580        {"vkDestroyFramebuffer", reinterpret_cast<PFN_vkVoidFunction>(DestroyFramebuffer)},
11581        {"vkDestroyRenderPass", reinterpret_cast<PFN_vkVoidFunction>(DestroyRenderPass)},
11582        {"vkCreateBuffer", reinterpret_cast<PFN_vkVoidFunction>(CreateBuffer)},
11583        {"vkCreateBufferView", reinterpret_cast<PFN_vkVoidFunction>(CreateBufferView)},
11584        {"vkCreateImage", reinterpret_cast<PFN_vkVoidFunction>(CreateImage)},
11585        {"vkCreateImageView", reinterpret_cast<PFN_vkVoidFunction>(CreateImageView)},
11586        {"vkCreateFence", reinterpret_cast<PFN_vkVoidFunction>(CreateFence)},
11587        {"vkCreatePipelineCache", reinterpret_cast<PFN_vkVoidFunction>(CreatePipelineCache)},
11588        {"vkDestroyPipelineCache", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipelineCache)},
11589        {"vkGetPipelineCacheData", reinterpret_cast<PFN_vkVoidFunction>(GetPipelineCacheData)},
11590        {"vkMergePipelineCaches", reinterpret_cast<PFN_vkVoidFunction>(MergePipelineCaches)},
11591        {"vkCreateGraphicsPipelines", reinterpret_cast<PFN_vkVoidFunction>(CreateGraphicsPipelines)},
11592        {"vkCreateComputePipelines", reinterpret_cast<PFN_vkVoidFunction>(CreateComputePipelines)},
11593        {"vkCreateSampler", reinterpret_cast<PFN_vkVoidFunction>(CreateSampler)},
11594        {"vkCreateDescriptorSetLayout", reinterpret_cast<PFN_vkVoidFunction>(CreateDescriptorSetLayout)},
11595        {"vkCreatePipelineLayout", reinterpret_cast<PFN_vkVoidFunction>(CreatePipelineLayout)},
11596        {"vkCreateDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(CreateDescriptorPool)},
11597        {"vkResetDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(ResetDescriptorPool)},
11598        {"vkAllocateDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(AllocateDescriptorSets)},
11599        {"vkFreeDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(FreeDescriptorSets)},
11600        {"vkUpdateDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(UpdateDescriptorSets)},
11601        {"vkCreateCommandPool", reinterpret_cast<PFN_vkVoidFunction>(CreateCommandPool)},
11602        {"vkDestroyCommandPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyCommandPool)},
11603        {"vkResetCommandPool", reinterpret_cast<PFN_vkVoidFunction>(ResetCommandPool)},
11604        {"vkCreateQueryPool", reinterpret_cast<PFN_vkVoidFunction>(CreateQueryPool)},
11605        {"vkAllocateCommandBuffers", reinterpret_cast<PFN_vkVoidFunction>(AllocateCommandBuffers)},
11606        {"vkFreeCommandBuffers", reinterpret_cast<PFN_vkVoidFunction>(FreeCommandBuffers)},
11607        {"vkBeginCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(BeginCommandBuffer)},
11608        {"vkEndCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(EndCommandBuffer)},
11609        {"vkResetCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(ResetCommandBuffer)},
11610        {"vkCmdBindPipeline", reinterpret_cast<PFN_vkVoidFunction>(CmdBindPipeline)},
11611        {"vkCmdSetViewport", reinterpret_cast<PFN_vkVoidFunction>(CmdSetViewport)},
11612        {"vkCmdSetScissor", reinterpret_cast<PFN_vkVoidFunction>(CmdSetScissor)},
11613        {"vkCmdSetLineWidth", reinterpret_cast<PFN_vkVoidFunction>(CmdSetLineWidth)},
11614        {"vkCmdSetDepthBias", reinterpret_cast<PFN_vkVoidFunction>(CmdSetDepthBias)},
11615        {"vkCmdSetBlendConstants", reinterpret_cast<PFN_vkVoidFunction>(CmdSetBlendConstants)},
11616        {"vkCmdSetDepthBounds", reinterpret_cast<PFN_vkVoidFunction>(CmdSetDepthBounds)},
11617        {"vkCmdSetStencilCompareMask", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilCompareMask)},
11618        {"vkCmdSetStencilWriteMask", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilWriteMask)},
11619        {"vkCmdSetStencilReference", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilReference)},
11620        {"vkCmdBindDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(CmdBindDescriptorSets)},
11621        {"vkCmdBindVertexBuffers", reinterpret_cast<PFN_vkVoidFunction>(CmdBindVertexBuffers)},
11622        {"vkCmdBindIndexBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdBindIndexBuffer)},
11623        {"vkCmdDraw", reinterpret_cast<PFN_vkVoidFunction>(CmdDraw)},
11624        {"vkCmdDrawIndexed", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndexed)},
11625        {"vkCmdDrawIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndirect)},
11626        {"vkCmdDrawIndexedIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndexedIndirect)},
11627        {"vkCmdDispatch", reinterpret_cast<PFN_vkVoidFunction>(CmdDispatch)},
11628        {"vkCmdDispatchIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDispatchIndirect)},
11629        {"vkCmdCopyBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyBuffer)},
11630        {"vkCmdCopyImage", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyImage)},
11631        {"vkCmdBlitImage", reinterpret_cast<PFN_vkVoidFunction>(CmdBlitImage)},
11632        {"vkCmdCopyBufferToImage", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyBufferToImage)},
11633        {"vkCmdCopyImageToBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyImageToBuffer)},
11634        {"vkCmdUpdateBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdUpdateBuffer)},
11635        {"vkCmdFillBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdFillBuffer)},
11636        {"vkCmdClearColorImage", reinterpret_cast<PFN_vkVoidFunction>(CmdClearColorImage)},
11637        {"vkCmdClearDepthStencilImage", reinterpret_cast<PFN_vkVoidFunction>(CmdClearDepthStencilImage)},
11638        {"vkCmdClearAttachments", reinterpret_cast<PFN_vkVoidFunction>(CmdClearAttachments)},
11639        {"vkCmdResolveImage", reinterpret_cast<PFN_vkVoidFunction>(CmdResolveImage)},
11640        {"vkGetImageSubresourceLayout", reinterpret_cast<PFN_vkVoidFunction>(GetImageSubresourceLayout) },
11641        {"vkCmdSetEvent", reinterpret_cast<PFN_vkVoidFunction>(CmdSetEvent)},
11642        {"vkCmdResetEvent", reinterpret_cast<PFN_vkVoidFunction>(CmdResetEvent)},
11643        {"vkCmdWaitEvents", reinterpret_cast<PFN_vkVoidFunction>(CmdWaitEvents)},
11644        {"vkCmdPipelineBarrier", reinterpret_cast<PFN_vkVoidFunction>(CmdPipelineBarrier)},
11645        {"vkCmdBeginQuery", reinterpret_cast<PFN_vkVoidFunction>(CmdBeginQuery)},
11646        {"vkCmdEndQuery", reinterpret_cast<PFN_vkVoidFunction>(CmdEndQuery)},
11647        {"vkCmdResetQueryPool", reinterpret_cast<PFN_vkVoidFunction>(CmdResetQueryPool)},
11648        {"vkCmdCopyQueryPoolResults", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyQueryPoolResults)},
11649        {"vkCmdPushConstants", reinterpret_cast<PFN_vkVoidFunction>(CmdPushConstants)},
11650        {"vkCmdWriteTimestamp", reinterpret_cast<PFN_vkVoidFunction>(CmdWriteTimestamp)},
11651        {"vkCreateFramebuffer", reinterpret_cast<PFN_vkVoidFunction>(CreateFramebuffer)},
11652        {"vkCreateShaderModule", reinterpret_cast<PFN_vkVoidFunction>(CreateShaderModule)},
11653        {"vkCreateRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CreateRenderPass)},
11654        {"vkCmdBeginRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CmdBeginRenderPass)},
11655        {"vkCmdNextSubpass", reinterpret_cast<PFN_vkVoidFunction>(CmdNextSubpass)},
11656        {"vkCmdEndRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CmdEndRenderPass)},
11657        {"vkCmdExecuteCommands", reinterpret_cast<PFN_vkVoidFunction>(CmdExecuteCommands)},
11658        {"vkSetEvent", reinterpret_cast<PFN_vkVoidFunction>(SetEvent)},
11659        {"vkMapMemory", reinterpret_cast<PFN_vkVoidFunction>(MapMemory)},
11660        {"vkUnmapMemory", reinterpret_cast<PFN_vkVoidFunction>(UnmapMemory)},
11661        {"vkFlushMappedMemoryRanges", reinterpret_cast<PFN_vkVoidFunction>(FlushMappedMemoryRanges)},
11662        {"vkInvalidateMappedMemoryRanges", reinterpret_cast<PFN_vkVoidFunction>(InvalidateMappedMemoryRanges)},
11663        {"vkAllocateMemory", reinterpret_cast<PFN_vkVoidFunction>(AllocateMemory)},
11664        {"vkFreeMemory", reinterpret_cast<PFN_vkVoidFunction>(FreeMemory)},
11665        {"vkBindBufferMemory", reinterpret_cast<PFN_vkVoidFunction>(BindBufferMemory)},
11666        {"vkGetBufferMemoryRequirements", reinterpret_cast<PFN_vkVoidFunction>(GetBufferMemoryRequirements)},
11667        {"vkGetImageMemoryRequirements", reinterpret_cast<PFN_vkVoidFunction>(GetImageMemoryRequirements)},
11668        {"vkGetQueryPoolResults", reinterpret_cast<PFN_vkVoidFunction>(GetQueryPoolResults)},
11669        {"vkBindImageMemory", reinterpret_cast<PFN_vkVoidFunction>(BindImageMemory)},
11670        {"vkQueueBindSparse", reinterpret_cast<PFN_vkVoidFunction>(QueueBindSparse)},
11671        {"vkCreateSemaphore", reinterpret_cast<PFN_vkVoidFunction>(CreateSemaphore)},
11672        {"vkCreateEvent", reinterpret_cast<PFN_vkVoidFunction>(CreateEvent)},
11673    };
11674
11675    for (size_t i = 0; i < ARRAY_SIZE(core_device_commands); i++) {
11676        if (!strcmp(core_device_commands[i].name, name)) return core_device_commands[i].proc;
11677    }
11678
11679    return nullptr;
11680}
11681
11682static PFN_vkVoidFunction intercept_device_extension_command(const char *name, VkDevice device) {
11683    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
11684
11685    const struct {
11686        const char *name;
11687        PFN_vkVoidFunction proc;
11688        bool enabled;
11689    } device_extension_commands[] = {
11690        {"vkCreateDescriptorUpdateTemplateKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateDescriptorUpdateTemplateKHR),
11691         device_data->device_extensions.khr_descriptor_update_template_enabled},
11692        {"vkDestroyDescriptorUpdateTemplateKHR", reinterpret_cast<PFN_vkVoidFunction>(DestroyDescriptorUpdateTemplateKHR),
11693         device_data->device_extensions.khr_descriptor_update_template_enabled},
11694        {"vkUpdateDescriptorSetWithTemplateKHR", reinterpret_cast<PFN_vkVoidFunction>(UpdateDescriptorSetWithTemplateKHR),
11695         device_data->device_extensions.khr_descriptor_update_template_enabled},
11696        {"vkCmdPushDescriptorSetWithTemplateKHR", reinterpret_cast<PFN_vkVoidFunction>(CmdPushDescriptorSetWithTemplateKHR),
11697         device_data->device_extensions.khr_descriptor_update_template_enabled},
11698    };
11699
11700    if (!device_data || !device_data->device_extensions.khr_descriptor_update_template_enabled) return nullptr;
11701
11702    for (size_t i = 0; i < ARRAY_SIZE(device_extension_commands); i++) {
11703        if (!strcmp(device_extension_commands[i].name, name) && device_extension_commands[i].enabled)
11704            return device_extension_commands[i].proc;
11705    }
11706
11707    return nullptr;
11708}
11709
11710static PFN_vkVoidFunction intercept_khr_swapchain_command(const char *name, VkDevice dev) {
11711    static const struct {
11712        const char *name;
11713        PFN_vkVoidFunction proc;
11714    } khr_swapchain_commands[] = {
11715        {"vkCreateSwapchainKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateSwapchainKHR)},
11716        {"vkDestroySwapchainKHR", reinterpret_cast<PFN_vkVoidFunction>(DestroySwapchainKHR)},
11717        {"vkGetSwapchainImagesKHR", reinterpret_cast<PFN_vkVoidFunction>(GetSwapchainImagesKHR)},
11718        {"vkAcquireNextImageKHR", reinterpret_cast<PFN_vkVoidFunction>(AcquireNextImageKHR)},
11719        {"vkQueuePresentKHR", reinterpret_cast<PFN_vkVoidFunction>(QueuePresentKHR)},
11720    };
11721    layer_data *dev_data = nullptr;
11722
11723    if (dev) {
11724        dev_data = GetLayerDataPtr(get_dispatch_key(dev), layer_data_map);
11725        if (!dev_data->device_extensions.khr_swapchain_enabled) return nullptr;
11726    }
11727
11728    for (size_t i = 0; i < ARRAY_SIZE(khr_swapchain_commands); i++) {
11729        if (!strcmp(khr_swapchain_commands[i].name, name)) return khr_swapchain_commands[i].proc;
11730    }
11731
11732    if (dev_data) {
11733        if (!dev_data->device_extensions.khr_display_swapchain_enabled) return nullptr;
11734    }
11735
11736    if (!strcmp("vkCreateSharedSwapchainsKHR", name)) return reinterpret_cast<PFN_vkVoidFunction>(CreateSharedSwapchainsKHR);
11737
11738    return nullptr;
11739}
11740
11741static PFN_vkVoidFunction intercept_khr_surface_command(const char *name, VkInstance instance) {
11742    static const struct {
11743        const char *name;
11744        PFN_vkVoidFunction proc;
11745        bool instance_layer_data::*enable;
11746    } khr_surface_commands[] = {
11747#ifdef VK_USE_PLATFORM_ANDROID_KHR
11748        {"vkCreateAndroidSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateAndroidSurfaceKHR),
11749         &instance_layer_data::androidSurfaceExtensionEnabled},
11750#endif  // VK_USE_PLATFORM_ANDROID_KHR
11751#ifdef VK_USE_PLATFORM_MIR_KHR
11752        {"vkCreateMirSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateMirSurfaceKHR),
11753         &instance_layer_data::mirSurfaceExtensionEnabled},
11754#endif  // VK_USE_PLATFORM_MIR_KHR
11755#ifdef VK_USE_PLATFORM_WAYLAND_KHR
11756        {"vkCreateWaylandSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateWaylandSurfaceKHR),
11757         &instance_layer_data::waylandSurfaceExtensionEnabled},
11758#endif  // VK_USE_PLATFORM_WAYLAND_KHR
11759#ifdef VK_USE_PLATFORM_WIN32_KHR
11760        {"vkCreateWin32SurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateWin32SurfaceKHR),
11761         &instance_layer_data::win32SurfaceExtensionEnabled},
11762#endif  // VK_USE_PLATFORM_WIN32_KHR
11763#ifdef VK_USE_PLATFORM_XCB_KHR
11764        {"vkCreateXcbSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateXcbSurfaceKHR),
11765         &instance_layer_data::xcbSurfaceExtensionEnabled},
11766#endif  // VK_USE_PLATFORM_XCB_KHR
11767#ifdef VK_USE_PLATFORM_XLIB_KHR
11768        {"vkCreateXlibSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateXlibSurfaceKHR),
11769         &instance_layer_data::xlibSurfaceExtensionEnabled},
11770#endif  // VK_USE_PLATFORM_XLIB_KHR
11771        {"vkCreateDisplayPlaneSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateDisplayPlaneSurfaceKHR),
11772         &instance_layer_data::displayExtensionEnabled},
11773        {"vkDestroySurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(DestroySurfaceKHR),
11774         &instance_layer_data::surfaceExtensionEnabled},
11775        {"vkGetPhysicalDeviceSurfaceCapabilitiesKHR", reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceSurfaceCapabilitiesKHR),
11776         &instance_layer_data::surfaceExtensionEnabled},
11777        {"vkGetPhysicalDeviceSurfaceSupportKHR", reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceSurfaceSupportKHR),
11778         &instance_layer_data::surfaceExtensionEnabled},
11779        {"vkGetPhysicalDeviceSurfacePresentModesKHR", reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceSurfacePresentModesKHR),
11780         &instance_layer_data::surfaceExtensionEnabled},
11781        {"vkGetPhysicalDeviceSurfaceFormatsKHR", reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceSurfaceFormatsKHR),
11782         &instance_layer_data::surfaceExtensionEnabled},
11783    };
11784
11785    instance_layer_data *instance_data = nullptr;
11786    if (instance) {
11787        instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
11788    }
11789
11790    for (size_t i = 0; i < ARRAY_SIZE(khr_surface_commands); i++) {
11791        if (!strcmp(khr_surface_commands[i].name, name)) {
11792            if (instance_data && !(instance_data->*(khr_surface_commands[i].enable))) return nullptr;
11793            return khr_surface_commands[i].proc;
11794        }
11795    }
11796
11797    return nullptr;
11798}
11799
11800static PFN_vkVoidFunction intercept_extension_instance_commands(const char *name, VkInstance instance) {
11801    static const struct {
11802        const char *name;
11803        PFN_vkVoidFunction proc;
11804        bool instance_layer_data::*enable;
11805    } instance_extension_commands[] = {
11806        {"vkGetPhysicalDeviceQueueFamilyProperties2KHR",
11807         reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceQueueFamilyProperties2KHR)},
11808        {"vkEnumeratePhysicalDeviceGroupsKHX",
11809         reinterpret_cast<PFN_vkVoidFunction>(EnumeratePhysicalDeviceGroupsKHX)},
11810    };
11811
11812    for (size_t i = 0; i < ARRAY_SIZE(instance_extension_commands); i++) {
11813        if (!strcmp(instance_extension_commands[i].name, name)) {
11814            return instance_extension_commands[i].proc;
11815        }
11816    }
11817    return nullptr;
11818}
11819
11820}  // namespace core_validation
11821
11822// vk_layer_logging.h expects these to be defined
11823
11824VKAPI_ATTR VkResult VKAPI_CALL vkCreateDebugReportCallbackEXT(VkInstance instance,
11825                                                              const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
11826                                                              const VkAllocationCallbacks *pAllocator,
11827                                                              VkDebugReportCallbackEXT *pMsgCallback) {
11828    return core_validation::CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
11829}
11830
11831VKAPI_ATTR void VKAPI_CALL vkDestroyDebugReportCallbackEXT(VkInstance instance, VkDebugReportCallbackEXT msgCallback,
11832                                                           const VkAllocationCallbacks *pAllocator) {
11833    core_validation::DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
11834}
11835
11836VKAPI_ATTR void VKAPI_CALL vkDebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags,
11837                                                   VkDebugReportObjectTypeEXT objType, uint64_t object, size_t location,
11838                                                   int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
11839    core_validation::DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, pMsg);
11840}
11841
11842// loader-layer interface v0, just wrappers since there is only a layer
11843
11844VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount,
11845                                                                                      VkExtensionProperties *pProperties) {
11846    return core_validation::EnumerateInstanceExtensionProperties(pLayerName, pCount, pProperties);
11847}
11848
11849VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceLayerProperties(uint32_t *pCount,
11850                                                                                  VkLayerProperties *pProperties) {
11851    return core_validation::EnumerateInstanceLayerProperties(pCount, pProperties);
11852}
11853
11854VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
11855                                                                                VkLayerProperties *pProperties) {
11856    // the layer command handles VK_NULL_HANDLE just fine internally
11857    assert(physicalDevice == VK_NULL_HANDLE);
11858    return core_validation::EnumerateDeviceLayerProperties(VK_NULL_HANDLE, pCount, pProperties);
11859}
11860
11861VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
11862                                                                                    const char *pLayerName, uint32_t *pCount,
11863                                                                                    VkExtensionProperties *pProperties) {
11864    // the layer command handles VK_NULL_HANDLE just fine internally
11865    assert(physicalDevice == VK_NULL_HANDLE);
11866    return core_validation::EnumerateDeviceExtensionProperties(VK_NULL_HANDLE, pLayerName, pCount, pProperties);
11867}
11868
11869VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) {
11870    return core_validation::GetDeviceProcAddr(dev, funcName);
11871}
11872
11873VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) {
11874    return core_validation::GetInstanceProcAddr(instance, funcName);
11875}
11876
11877VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_layerGetPhysicalDeviceProcAddr(VkInstance instance,
11878                                                                                           const char *funcName) {
11879    return core_validation::GetPhysicalDeviceProcAddr(instance, funcName);
11880}
11881
11882VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkNegotiateLoaderLayerInterfaceVersion(VkNegotiateLayerInterface *pVersionStruct) {
11883    assert(pVersionStruct != NULL);
11884    assert(pVersionStruct->sType == LAYER_NEGOTIATE_INTERFACE_STRUCT);
11885
11886    // Fill in the function pointers if our version is at least capable of having the structure contain them.
11887    if (pVersionStruct->loaderLayerInterfaceVersion >= 2) {
11888        pVersionStruct->pfnGetInstanceProcAddr = vkGetInstanceProcAddr;
11889        pVersionStruct->pfnGetDeviceProcAddr = vkGetDeviceProcAddr;
11890        pVersionStruct->pfnGetPhysicalDeviceProcAddr = vk_layerGetPhysicalDeviceProcAddr;
11891    }
11892
11893    if (pVersionStruct->loaderLayerInterfaceVersion < CURRENT_LOADER_LAYER_INTERFACE_VERSION) {
11894        core_validation::loader_layer_if_version = pVersionStruct->loaderLayerInterfaceVersion;
11895    } else if (pVersionStruct->loaderLayerInterfaceVersion > CURRENT_LOADER_LAYER_INTERFACE_VERSION) {
11896        pVersionStruct->loaderLayerInterfaceVersion = CURRENT_LOADER_LAYER_INTERFACE_VERSION;
11897    }
11898
11899    return VK_SUCCESS;
11900}
11901