core_validation.cpp revision d81f107590290d41d3e39fcbf3f077658be0f5a6
1/* Copyright (c) 2015-2017 The Khronos Group Inc.
2 * Copyright (c) 2015-2017 Valve Corporation
3 * Copyright (c) 2015-2017 LunarG, Inc.
4 * Copyright (C) 2015-2017 Google Inc.
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 *     http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * Author: Cody Northrop <cnorthrop@google.com>
19 * Author: Michael Lentine <mlentine@google.com>
20 * Author: Tobin Ehlis <tobine@google.com>
21 * Author: Chia-I Wu <olv@google.com>
22 * Author: Chris Forbes <chrisf@ijw.co.nz>
23 * Author: Mark Lobodzinski <mark@lunarg.com>
24 * Author: Ian Elliott <ianelliott@google.com>
25 * Author: Dave Houlton <daveh@lunarg.com>
26 * Author: Dustin Graves <dustin@lunarg.com>
27 * Author: Jeremy Hayes <jeremy@lunarg.com>
28 * Author: Jon Ashburn <jon@lunarg.com>
29 * Author: Karl Schultz <karl@lunarg.com>
30 * Author: Mark Young <marky@lunarg.com>
31 * Author: Mike Schuchardt <mikes@lunarg.com>
32 * Author: Mike Weiblen <mikew@lunarg.com>
33 * Author: Tony Barbour <tony@LunarG.com>
34 */
35
36// Allow use of STL min and max functions in Windows
37#define NOMINMAX
38
39#include <SPIRV/spirv.hpp>
40#include <algorithm>
41#include <assert.h>
42#include <iostream>
43#include <list>
44#include <map>
45#include <mutex>
46#include <set>
47#include <sstream>
48#include <stdio.h>
49#include <stdlib.h>
50#include <string.h>
51#include <string>
52#include <tuple>
53
54#include "vk_loader_platform.h"
55#include "vk_dispatch_table_helper.h"
56#include "vk_enum_string_helper.h"
57#if defined(__GNUC__)
58#pragma GCC diagnostic ignored "-Wwrite-strings"
59#endif
60#if defined(__GNUC__)
61#pragma GCC diagnostic warning "-Wwrite-strings"
62#endif
63#include "core_validation.h"
64#include "buffer_validation.h"
65#include "vk_layer_table.h"
66#include "vk_layer_data.h"
67#include "vk_layer_extension_utils.h"
68#include "vk_layer_utils.h"
69#include "spirv-tools/libspirv.h"
70
71#if defined __ANDROID__
72#include <android/log.h>
73#define LOGCONSOLE(...) ((void)__android_log_print(ANDROID_LOG_INFO, "DS", __VA_ARGS__))
74#else
75#define LOGCONSOLE(...)      \
76    {                        \
77        printf(__VA_ARGS__); \
78        printf("\n");        \
79    }
80#endif
81
82// This intentionally includes a cpp file
83#include "vk_safe_struct.cpp"
84
85using namespace std;
86
87namespace core_validation {
88
89using std::unordered_map;
90using std::unordered_set;
91
92// WSI Image Objects bypass usual Image Object creation methods.  A special Memory
93// Object value will be used to identify them internally.
94static const VkDeviceMemory MEMTRACKER_SWAP_CHAIN_IMAGE_KEY = (VkDeviceMemory)(-1);
95// 2nd special memory handle used to flag object as unbound from memory
96static const VkDeviceMemory MEMORY_UNBOUND = VkDeviceMemory(~((uint64_t)(0)) - 1);
97
98// A special value of (0xFFFFFFFF, 0xFFFFFFFF) indicates that the surface size will be determined
99// by the extent of a swapchain targeting the surface.
100static const uint32_t kSurfaceSizeFromSwapchain = 0xFFFFFFFFu;
101
102struct devExts {
103    bool wsi_enabled;
104    bool wsi_display_swapchain_enabled;
105    bool nv_glsl_shader_enabled;
106    unordered_map<VkSwapchainKHR, unique_ptr<SWAPCHAIN_NODE>> swapchainMap;
107    unordered_map<VkImage, VkSwapchainKHR> imageToSwapchainMap;
108};
109
110// fwd decls
111struct shader_module;
112
113struct instance_layer_data {
114    VkInstance instance = VK_NULL_HANDLE;
115    debug_report_data *report_data = nullptr;
116    std::vector<VkDebugReportCallbackEXT> logging_callback;
117    VkLayerInstanceDispatchTable dispatch_table;
118
119    CALL_STATE vkEnumeratePhysicalDevicesState = UNCALLED;
120    uint32_t physical_devices_count = 0;
121    CALL_STATE vkEnumeratePhysicalDeviceGroupsState = UNCALLED;
122    uint32_t physical_device_groups_count = 0;
123    CHECK_DISABLED disabled = {};
124
125    unordered_map<VkPhysicalDevice, PHYSICAL_DEVICE_STATE> physical_device_map;
126    unordered_map<VkSurfaceKHR, SURFACE_STATE> surface_map;
127
128    bool surfaceExtensionEnabled = false;
129    bool displayExtensionEnabled = false;
130    bool androidSurfaceExtensionEnabled = false;
131    bool mirSurfaceExtensionEnabled = false;
132    bool waylandSurfaceExtensionEnabled = false;
133    bool win32SurfaceExtensionEnabled = false;
134    bool xcbSurfaceExtensionEnabled = false;
135    bool xlibSurfaceExtensionEnabled = false;
136};
137
138struct layer_data {
139    debug_report_data *report_data = nullptr;
140    VkLayerDispatchTable dispatch_table;
141
142    devExts device_extensions = {};
143    unordered_set<VkQueue> queues;  // All queues under given device
144    // Global set of all cmdBuffers that are inFlight on this device
145    unordered_set<VkCommandBuffer> globalInFlightCmdBuffers;
146    // Layer specific data
147    unordered_map<VkSampler, unique_ptr<SAMPLER_STATE>> samplerMap;
148    unordered_map<VkImageView, unique_ptr<IMAGE_VIEW_STATE>> imageViewMap;
149    unordered_map<VkImage, unique_ptr<IMAGE_STATE>> imageMap;
150    unordered_map<VkBufferView, unique_ptr<BUFFER_VIEW_STATE>> bufferViewMap;
151    unordered_map<VkBuffer, unique_ptr<BUFFER_STATE>> bufferMap;
152    unordered_map<VkPipeline, PIPELINE_STATE *> pipelineMap;
153    unordered_map<VkCommandPool, COMMAND_POOL_NODE> commandPoolMap;
154    unordered_map<VkDescriptorPool, DESCRIPTOR_POOL_STATE *> descriptorPoolMap;
155    unordered_map<VkDescriptorSet, cvdescriptorset::DescriptorSet *> setMap;
156    unordered_map<VkDescriptorSetLayout, cvdescriptorset::DescriptorSetLayout *> descriptorSetLayoutMap;
157    unordered_map<VkPipelineLayout, PIPELINE_LAYOUT_NODE> pipelineLayoutMap;
158    unordered_map<VkDeviceMemory, unique_ptr<DEVICE_MEM_INFO>> memObjMap;
159    unordered_map<VkFence, FENCE_NODE> fenceMap;
160    unordered_map<VkQueue, QUEUE_STATE> queueMap;
161    unordered_map<VkEvent, EVENT_STATE> eventMap;
162    unordered_map<QueryObject, bool> queryToStateMap;
163    unordered_map<VkQueryPool, QUERY_POOL_NODE> queryPoolMap;
164    unordered_map<VkSemaphore, SEMAPHORE_NODE> semaphoreMap;
165    unordered_map<VkCommandBuffer, GLOBAL_CB_NODE *> commandBufferMap;
166    unordered_map<VkFramebuffer, unique_ptr<FRAMEBUFFER_STATE>> frameBufferMap;
167    unordered_map<VkImage, vector<ImageSubresourcePair>> imageSubresourceMap;
168    unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> imageLayoutMap;
169    unordered_map<VkRenderPass, unique_ptr<RENDER_PASS_STATE>> renderPassMap;
170    unordered_map<VkShaderModule, unique_ptr<shader_module>> shaderModuleMap;
171
172    VkDevice device = VK_NULL_HANDLE;
173    VkPhysicalDevice physical_device = VK_NULL_HANDLE;
174
175    instance_layer_data *instance_data = nullptr;  // from device to enclosing instance
176
177    VkPhysicalDeviceFeatures enabled_features = {};
178    // Device specific data
179    PHYS_DEV_PROPERTIES_NODE phys_dev_properties = {};
180    VkPhysicalDeviceMemoryProperties phys_dev_mem_props = {};
181    VkPhysicalDeviceProperties phys_dev_props = {};
182};
183
184// TODO : Do we need to guard access to layer_data_map w/ lock?
185static unordered_map<void *, layer_data *> layer_data_map;
186static unordered_map<void *, instance_layer_data *> instance_layer_data_map;
187
188static uint32_t loader_layer_if_version = CURRENT_LOADER_LAYER_INTERFACE_VERSION;
189
190static const VkLayerProperties global_layer = {
191    "VK_LAYER_LUNARG_core_validation", VK_LAYER_API_VERSION, 1, "LunarG Validation Layer",
192};
193
194template <class TCreateInfo>
195void ValidateLayerOrdering(const TCreateInfo &createInfo) {
196    bool foundLayer = false;
197    for (uint32_t i = 0; i < createInfo.enabledLayerCount; ++i) {
198        if (!strcmp(createInfo.ppEnabledLayerNames[i], global_layer.layerName)) {
199            foundLayer = true;
200        }
201        // This has to be logged to console as we don't have a callback at this point.
202        if (!foundLayer && !strcmp(createInfo.ppEnabledLayerNames[0], "VK_LAYER_GOOGLE_unique_objects")) {
203            LOGCONSOLE("Cannot activate layer VK_LAYER_GOOGLE_unique_objects prior to activating %s.", global_layer.layerName);
204        }
205    }
206}
207
208// Code imported from shader_checker
209static void build_def_index(shader_module *);
210
211// A forward iterator over spirv instructions. Provides easy access to len, opcode, and content words
212// without the caller needing to care too much about the physical SPIRV module layout.
213struct spirv_inst_iter {
214    std::vector<uint32_t>::const_iterator zero;
215    std::vector<uint32_t>::const_iterator it;
216
217    uint32_t len() {
218        auto result = *it >> 16;
219        assert(result > 0);
220        return result;
221    }
222
223    uint32_t opcode() { return *it & 0x0ffffu; }
224
225    uint32_t const &word(unsigned n) {
226        assert(n < len());
227        return it[n];
228    }
229
230    uint32_t offset() { return (uint32_t)(it - zero); }
231
232    spirv_inst_iter() {}
233
234    spirv_inst_iter(std::vector<uint32_t>::const_iterator zero, std::vector<uint32_t>::const_iterator it) : zero(zero), it(it) {}
235
236    bool operator==(spirv_inst_iter const &other) { return it == other.it; }
237
238    bool operator!=(spirv_inst_iter const &other) { return it != other.it; }
239
240    spirv_inst_iter operator++(int) {  // x++
241        spirv_inst_iter ii = *this;
242        it += len();
243        return ii;
244    }
245
246    spirv_inst_iter operator++() {  // ++x;
247        it += len();
248        return *this;
249    }
250
251    // The iterator and the value are the same thing.
252    spirv_inst_iter &operator*() { return *this; }
253    spirv_inst_iter const &operator*() const { return *this; }
254};
255
256struct shader_module {
257    // The spirv image itself
258    vector<uint32_t> words;
259    // A mapping of <id> to the first word of its def. this is useful because walking type
260    // trees, constant expressions, etc requires jumping all over the instruction stream.
261    unordered_map<unsigned, unsigned> def_index;
262    bool has_valid_spirv;
263
264    shader_module(VkShaderModuleCreateInfo const *pCreateInfo)
265        : words((uint32_t *)pCreateInfo->pCode, (uint32_t *)pCreateInfo->pCode + pCreateInfo->codeSize / sizeof(uint32_t)),
266          def_index(),
267          has_valid_spirv(true) {
268        build_def_index(this);
269    }
270
271    shader_module() : has_valid_spirv(false) {}
272
273    // Expose begin() / end() to enable range-based for
274    spirv_inst_iter begin() const { return spirv_inst_iter(words.begin(), words.begin() + 5); }  // First insn
275    spirv_inst_iter end() const { return spirv_inst_iter(words.begin(), words.end()); }          // Just past last insn
276    // Given an offset into the module, produce an iterator there.
277    spirv_inst_iter at(unsigned offset) const { return spirv_inst_iter(words.begin(), words.begin() + offset); }
278
279    // Gets an iterator to the definition of an id
280    spirv_inst_iter get_def(unsigned id) const {
281        auto it = def_index.find(id);
282        if (it == def_index.end()) {
283            return end();
284        }
285        return at(it->second);
286    }
287};
288
289// TODO : This can be much smarter, using separate locks for separate global data
290static std::mutex global_lock;
291
292// Return IMAGE_VIEW_STATE ptr for specified imageView or else NULL
293IMAGE_VIEW_STATE *getImageViewState(const layer_data *dev_data, VkImageView image_view) {
294    auto iv_it = dev_data->imageViewMap.find(image_view);
295    if (iv_it == dev_data->imageViewMap.end()) {
296        return nullptr;
297    }
298    return iv_it->second.get();
299}
300// Return sampler node ptr for specified sampler or else NULL
301SAMPLER_STATE *getSamplerState(const layer_data *dev_data, VkSampler sampler) {
302    auto sampler_it = dev_data->samplerMap.find(sampler);
303    if (sampler_it == dev_data->samplerMap.end()) {
304        return nullptr;
305    }
306    return sampler_it->second.get();
307}
308// Return image state ptr for specified image or else NULL
309IMAGE_STATE *getImageState(const layer_data *dev_data, VkImage image) {
310    auto img_it = dev_data->imageMap.find(image);
311    if (img_it == dev_data->imageMap.end()) {
312        return nullptr;
313    }
314    return img_it->second.get();
315}
316// Return buffer state ptr for specified buffer or else NULL
317BUFFER_STATE *getBufferState(const layer_data *dev_data, VkBuffer buffer) {
318    auto buff_it = dev_data->bufferMap.find(buffer);
319    if (buff_it == dev_data->bufferMap.end()) {
320        return nullptr;
321    }
322    return buff_it->second.get();
323}
324// Return swapchain node for specified swapchain or else NULL
325SWAPCHAIN_NODE *getSwapchainNode(const layer_data *dev_data, VkSwapchainKHR swapchain) {
326    auto swp_it = dev_data->device_extensions.swapchainMap.find(swapchain);
327    if (swp_it == dev_data->device_extensions.swapchainMap.end()) {
328        return nullptr;
329    }
330    return swp_it->second.get();
331}
332// Return swapchain for specified image or else NULL
333VkSwapchainKHR getSwapchainFromImage(const layer_data *dev_data, VkImage image) {
334    auto img_it = dev_data->device_extensions.imageToSwapchainMap.find(image);
335    if (img_it == dev_data->device_extensions.imageToSwapchainMap.end()) {
336        return VK_NULL_HANDLE;
337    }
338    return img_it->second;
339}
340// Return buffer node ptr for specified buffer or else NULL
341BUFFER_VIEW_STATE *getBufferViewState(const layer_data *my_data, VkBufferView buffer_view) {
342    auto bv_it = my_data->bufferViewMap.find(buffer_view);
343    if (bv_it == my_data->bufferViewMap.end()) {
344        return nullptr;
345    }
346    return bv_it->second.get();
347}
348
349FENCE_NODE *getFenceNode(layer_data *dev_data, VkFence fence) {
350    auto it = dev_data->fenceMap.find(fence);
351    if (it == dev_data->fenceMap.end()) {
352        return nullptr;
353    }
354    return &it->second;
355}
356
357EVENT_STATE *getEventNode(layer_data *dev_data, VkEvent event) {
358    auto it = dev_data->eventMap.find(event);
359    if (it == dev_data->eventMap.end()) {
360        return nullptr;
361    }
362    return &it->second;
363}
364
365QUERY_POOL_NODE *getQueryPoolNode(layer_data *dev_data, VkQueryPool query_pool) {
366    auto it = dev_data->queryPoolMap.find(query_pool);
367    if (it == dev_data->queryPoolMap.end()) {
368        return nullptr;
369    }
370    return &it->second;
371}
372
373QUEUE_STATE *getQueueState(layer_data *dev_data, VkQueue queue) {
374    auto it = dev_data->queueMap.find(queue);
375    if (it == dev_data->queueMap.end()) {
376        return nullptr;
377    }
378    return &it->second;
379}
380
381SEMAPHORE_NODE *getSemaphoreNode(layer_data *dev_data, VkSemaphore semaphore) {
382    auto it = dev_data->semaphoreMap.find(semaphore);
383    if (it == dev_data->semaphoreMap.end()) {
384        return nullptr;
385    }
386    return &it->second;
387}
388
389COMMAND_POOL_NODE *getCommandPoolNode(layer_data *dev_data, VkCommandPool pool) {
390    auto it = dev_data->commandPoolMap.find(pool);
391    if (it == dev_data->commandPoolMap.end()) {
392        return nullptr;
393    }
394    return &it->second;
395}
396
397PHYSICAL_DEVICE_STATE *getPhysicalDeviceState(instance_layer_data *instance_data, VkPhysicalDevice phys) {
398    auto it = instance_data->physical_device_map.find(phys);
399    if (it == instance_data->physical_device_map.end()) {
400        return nullptr;
401    }
402    return &it->second;
403}
404
405SURFACE_STATE *getSurfaceState(instance_layer_data *instance_data, VkSurfaceKHR surface) {
406    auto it = instance_data->surface_map.find(surface);
407    if (it == instance_data->surface_map.end()) {
408        return nullptr;
409    }
410    return &it->second;
411}
412
413// Return ptr to memory binding for given handle of specified type
414static BINDABLE *GetObjectMemBinding(layer_data *my_data, uint64_t handle, VkDebugReportObjectTypeEXT type) {
415    switch (type) {
416        case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT:
417            return getImageState(my_data, VkImage(handle));
418        case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
419            return getBufferState(my_data, VkBuffer(handle));
420        default:
421            break;
422    }
423    return nullptr;
424}
425// prototype
426GLOBAL_CB_NODE *getCBNode(layer_data const *, const VkCommandBuffer);
427
428// Helper function to validate correct usage bits set for buffers or images
429//  Verify that (actual & desired) flags != 0 or,
430//   if strict is true, verify that (actual & desired) flags == desired
431//  In case of error, report it via dbg callbacks
432static bool validate_usage_flags(layer_data *my_data, VkFlags actual, VkFlags desired, VkBool32 strict, uint64_t obj_handle,
433                                 VkDebugReportObjectTypeEXT obj_type, int32_t const msgCode, char const *ty_str,
434                                 char const *func_name, char const *usage_str) {
435    bool correct_usage = false;
436    bool skip_call = false;
437    if (strict)
438        correct_usage = ((actual & desired) == desired);
439    else
440        correct_usage = ((actual & desired) != 0);
441    if (!correct_usage) {
442        if (msgCode == -1) {
443            // TODO: Fix callers with msgCode == -1 to use correct validation checks.
444            skip_call =
445                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_type, obj_handle, __LINE__,
446                        MEMTRACK_INVALID_USAGE_FLAG, "MEM", "Invalid usage flag for %s 0x%" PRIxLEAST64
447                                                            " used by %s. In this case, %s should have %s set during creation.",
448                        ty_str, obj_handle, func_name, ty_str, usage_str);
449        } else {
450            const char *valid_usage = (msgCode == -1) ? "" : validation_error_map[msgCode];
451            skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_type, obj_handle, __LINE__, msgCode, "MEM",
452                                "Invalid usage flag for %s 0x%" PRIxLEAST64
453                                " used by %s. In this case, %s should have %s set during creation. %s",
454                                ty_str, obj_handle, func_name, ty_str, usage_str, valid_usage);
455        }
456    }
457    return skip_call;
458}
459
460// Helper function to validate usage flags for buffers
461// For given buffer_state send actual vs. desired usage off to helper above where
462//  an error will be flagged if usage is not correct
463bool ValidateImageUsageFlags(layer_data *dev_data, IMAGE_STATE const *image_state, VkFlags desired, VkBool32 strict,
464                             int32_t const msgCode, char const *func_name, char const *usage_string) {
465    return validate_usage_flags(dev_data, image_state->createInfo.usage, desired, strict,
466                                reinterpret_cast<const uint64_t &>(image_state->image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
467                                msgCode, "image", func_name, usage_string);
468}
469
470// Helper function to validate usage flags for buffers
471// For given buffer_state send actual vs. desired usage off to helper above where
472//  an error will be flagged if usage is not correct
473static bool ValidateBufferUsageFlags(layer_data *dev_data, BUFFER_STATE const *buffer_state, VkFlags desired, VkBool32 strict,
474                                     int32_t const msgCode, char const *func_name, char const *usage_string) {
475    return validate_usage_flags(dev_data, buffer_state->createInfo.usage, desired, strict,
476                                reinterpret_cast<const uint64_t &>(buffer_state->buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
477                                msgCode, "buffer", func_name, usage_string);
478}
479
480// Return ptr to info in map container containing mem, or NULL if not found
481//  Calls to this function should be wrapped in mutex
482DEVICE_MEM_INFO *getMemObjInfo(const layer_data *dev_data, const VkDeviceMemory mem) {
483    auto mem_it = dev_data->memObjMap.find(mem);
484    if (mem_it == dev_data->memObjMap.end()) {
485        return NULL;
486    }
487    return mem_it->second.get();
488}
489
490static void add_mem_obj_info(layer_data *my_data, void *object, const VkDeviceMemory mem,
491                             const VkMemoryAllocateInfo *pAllocateInfo) {
492    assert(object != NULL);
493
494    my_data->memObjMap[mem] = unique_ptr<DEVICE_MEM_INFO>(new DEVICE_MEM_INFO(object, mem, pAllocateInfo));
495}
496
497// Helper function to print lowercase string of object type
498//  TODO: Unify string helper functions, this should really come out of a string helper if not there already
499static const char *object_type_to_string(VkDebugReportObjectTypeEXT type) {
500    switch (type) {
501        case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT:
502            return "image";
503        case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
504            return "buffer";
505        case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT:
506            return "image view";
507        case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT:
508            return "buffer view";
509        case VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT:
510            return "swapchain";
511        case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT:
512            return "descriptor set";
513        case VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT:
514            return "framebuffer";
515        case VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT:
516            return "event";
517        case VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT:
518            return "query pool";
519        case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT:
520            return "descriptor pool";
521        case VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT:
522            return "command pool";
523        case VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT:
524            return "pipeline";
525        case VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT:
526            return "sampler";
527        case VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT:
528            return "renderpass";
529        case VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT:
530            return "device memory";
531        case VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT:
532            return "semaphore";
533        default:
534            return "unknown";
535    }
536}
537
538// For given bound_object_handle, bound to given mem allocation, verify that the range for the bound object is valid
539static bool ValidateMemoryIsValid(layer_data *dev_data, VkDeviceMemory mem, uint64_t bound_object_handle,
540                                  VkDebugReportObjectTypeEXT type, const char *functionName) {
541    DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem);
542    if (mem_info) {
543        if (!mem_info->bound_ranges[bound_object_handle].valid) {
544            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
545                           reinterpret_cast<uint64_t &>(mem), __LINE__, MEMTRACK_INVALID_MEM_REGION, "MEM",
546                           "%s: Cannot read invalid region of memory allocation 0x%" PRIx64 " for bound %s object 0x%" PRIx64
547                           ", please fill the memory before using.",
548                           functionName, reinterpret_cast<uint64_t &>(mem), object_type_to_string(type), bound_object_handle);
549        }
550    }
551    return false;
552}
553// For given image_state
554//  If mem is special swapchain key, then verify that image_state valid member is true
555//  Else verify that the image's bound memory range is valid
556bool ValidateImageMemoryIsValid(layer_data *dev_data, IMAGE_STATE *image_state, const char *functionName) {
557    if (image_state->binding.mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
558        if (!image_state->valid) {
559            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
560                           reinterpret_cast<uint64_t &>(image_state->binding.mem), __LINE__, MEMTRACK_INVALID_MEM_REGION, "MEM",
561                           "%s: Cannot read invalid swapchain image 0x%" PRIx64 ", please fill the memory before using.",
562                           functionName, reinterpret_cast<uint64_t &>(image_state->image));
563        }
564    } else {
565        return ValidateMemoryIsValid(dev_data, image_state->binding.mem, reinterpret_cast<uint64_t &>(image_state->image),
566                                     VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, functionName);
567    }
568    return false;
569}
570// For given buffer_state, verify that the range it's bound to is valid
571static bool ValidateBufferMemoryIsValid(layer_data *dev_data, BUFFER_STATE *buffer_state, const char *functionName) {
572    return ValidateMemoryIsValid(dev_data, buffer_state->binding.mem, reinterpret_cast<uint64_t &>(buffer_state->buffer),
573                                 VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, functionName);
574}
575// For the given memory allocation, set the range bound by the given handle object to the valid param value
576static void SetMemoryValid(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, bool valid) {
577    DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem);
578    if (mem_info) {
579        mem_info->bound_ranges[handle].valid = valid;
580    }
581}
582// For given image node
583//  If mem is special swapchain key, then set entire image_state to valid param value
584//  Else set the image's bound memory range to valid param value
585void SetImageMemoryValid(layer_data *dev_data, IMAGE_STATE *image_state, bool valid) {
586    if (image_state->binding.mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
587        image_state->valid = valid;
588    } else {
589        SetMemoryValid(dev_data, image_state->binding.mem, reinterpret_cast<uint64_t &>(image_state->image), valid);
590    }
591}
592// For given buffer node set the buffer's bound memory range to valid param value
593static void SetBufferMemoryValid(layer_data *dev_data, BUFFER_STATE *buffer_state, bool valid) {
594    SetMemoryValid(dev_data, buffer_state->binding.mem, reinterpret_cast<uint64_t &>(buffer_state->buffer), valid);
595}
596// Find CB Info and add mem reference to list container
597// Find Mem Obj Info and add CB reference to list container
598static bool update_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb, const VkDeviceMemory mem,
599                                              const char *apiName) {
600    bool skip_call = false;
601
602    // Skip validation if this image was created through WSI
603    if (mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
604        // First update CB binding in MemObj mini CB list
605        DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, mem);
606        if (pMemInfo) {
607            // Now update CBInfo's Mem reference list
608            GLOBAL_CB_NODE *cb_node = getCBNode(dev_data, cb);
609            pMemInfo->cb_bindings.insert(cb_node);
610            // TODO: keep track of all destroyed CBs so we know if this is a stale or simply invalid object
611            if (cb_node) {
612                cb_node->memObjs.insert(mem);
613            }
614        }
615    }
616    return skip_call;
617}
618
619// Create binding link between given sampler and command buffer node
620void AddCommandBufferBindingSampler(GLOBAL_CB_NODE *cb_node, SAMPLER_STATE *sampler_state) {
621    sampler_state->cb_bindings.insert(cb_node);
622    cb_node->object_bindings.insert(
623        {reinterpret_cast<uint64_t &>(sampler_state->sampler), VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT});
624}
625
626// Create binding link between given image node and command buffer node
627void AddCommandBufferBindingImage(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, IMAGE_STATE *image_state) {
628    // Skip validation if this image was created through WSI
629    if (image_state->binding.mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
630        // First update CB binding in MemObj mini CB list
631        for (auto mem_binding : image_state->GetBoundMemory()) {
632            DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, mem_binding);
633            if (pMemInfo) {
634                pMemInfo->cb_bindings.insert(cb_node);
635                // Now update CBInfo's Mem reference list
636                cb_node->memObjs.insert(mem_binding);
637            }
638        }
639        // Now update cb binding for image
640        cb_node->object_bindings.insert({reinterpret_cast<uint64_t &>(image_state->image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT});
641        image_state->cb_bindings.insert(cb_node);
642    }
643}
644
645// Create binding link between given image view node and its image with command buffer node
646void AddCommandBufferBindingImageView(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, IMAGE_VIEW_STATE *view_state) {
647    // First add bindings for imageView
648    view_state->cb_bindings.insert(cb_node);
649    cb_node->object_bindings.insert(
650        {reinterpret_cast<uint64_t &>(view_state->image_view), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT});
651    auto image_state = getImageState(dev_data, view_state->create_info.image);
652    // Add bindings for image within imageView
653    if (image_state) {
654        AddCommandBufferBindingImage(dev_data, cb_node, image_state);
655    }
656}
657
658// Create binding link between given buffer node and command buffer node
659void AddCommandBufferBindingBuffer(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, BUFFER_STATE *buffer_state) {
660    // First update CB binding in MemObj mini CB list
661    for (auto mem_binding : buffer_state->GetBoundMemory()) {
662        DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, mem_binding);
663        if (pMemInfo) {
664            pMemInfo->cb_bindings.insert(cb_node);
665            // Now update CBInfo's Mem reference list
666            cb_node->memObjs.insert(mem_binding);
667        }
668    }
669    // Now update cb binding for buffer
670    cb_node->object_bindings.insert({reinterpret_cast<uint64_t &>(buffer_state->buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT});
671    buffer_state->cb_bindings.insert(cb_node);
672}
673
674// Create binding link between given buffer view node and its buffer with command buffer node
675void AddCommandBufferBindingBufferView(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, BUFFER_VIEW_STATE *view_state) {
676    // First add bindings for bufferView
677    view_state->cb_bindings.insert(cb_node);
678    cb_node->object_bindings.insert(
679        {reinterpret_cast<uint64_t &>(view_state->buffer_view), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT});
680    auto buffer_state = getBufferState(dev_data, view_state->create_info.buffer);
681    // Add bindings for buffer within bufferView
682    if (buffer_state) {
683        AddCommandBufferBindingBuffer(dev_data, cb_node, buffer_state);
684    }
685}
686
687// For every mem obj bound to particular CB, free bindings related to that CB
688static void clear_cmd_buf_and_mem_references(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
689    if (cb_node) {
690        if (cb_node->memObjs.size() > 0) {
691            for (auto mem : cb_node->memObjs) {
692                DEVICE_MEM_INFO *pInfo = getMemObjInfo(dev_data, mem);
693                if (pInfo) {
694                    pInfo->cb_bindings.erase(cb_node);
695                }
696            }
697            cb_node->memObjs.clear();
698        }
699        cb_node->validate_functions.clear();
700    }
701}
702// Overloaded call to above function when GLOBAL_CB_NODE has not already been looked-up
703static void clear_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb) {
704    clear_cmd_buf_and_mem_references(dev_data, getCBNode(dev_data, cb));
705}
706
707// Clear a single object binding from given memory object, or report error if binding is missing
708static bool ClearMemoryObjectBinding(layer_data *dev_data, uint64_t handle, VkDebugReportObjectTypeEXT type, VkDeviceMemory mem) {
709    DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem);
710    // This obj is bound to a memory object. Remove the reference to this object in that memory object's list
711    if (mem_info) {
712        mem_info->obj_bindings.erase({handle, type});
713    }
714    return false;
715}
716
717// ClearMemoryObjectBindings clears the binding of objects to memory
718//  For the given object it pulls the memory bindings and makes sure that the bindings
719//  no longer refer to the object being cleared. This occurs when objects are destroyed.
720bool ClearMemoryObjectBindings(layer_data *dev_data, uint64_t handle, VkDebugReportObjectTypeEXT type) {
721    bool skip = false;
722    BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
723    if (mem_binding) {
724        if (!mem_binding->sparse) {
725            skip = ClearMemoryObjectBinding(dev_data, handle, type, mem_binding->binding.mem);
726        } else {  // Sparse, clear all bindings
727            for (auto &sparse_mem_binding : mem_binding->sparse_bindings) {
728                skip |= ClearMemoryObjectBinding(dev_data, handle, type, sparse_mem_binding.mem);
729            }
730        }
731    }
732    return skip;
733}
734
735// For given mem object, verify that it is not null or UNBOUND, if it is, report error. Return skip value.
736bool VerifyBoundMemoryIsValid(const layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, const char *api_name,
737                              const char *type_name, UNIQUE_VALIDATION_ERROR_CODE error_code) {
738    bool result = false;
739    if (VK_NULL_HANDLE == mem) {
740        result = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, handle,
741                         __LINE__, error_code, "MEM", "%s: Vk%s object 0x%" PRIxLEAST64
742                                                      " used with no memory bound. Memory should be bound by calling "
743                                                      "vkBind%sMemory(). %s",
744                         api_name, type_name, handle, type_name, validation_error_map[error_code]);
745    } else if (MEMORY_UNBOUND == mem) {
746        result = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, handle,
747                         __LINE__, error_code, "MEM", "%s: Vk%s object 0x%" PRIxLEAST64
748                                                      " used with no memory bound and previously bound memory was freed. "
749                                                      "Memory must not be freed prior to this operation. %s",
750                         api_name, type_name, handle, validation_error_map[error_code]);
751    }
752    return result;
753}
754
755// Check to see if memory was ever bound to this image
756bool ValidateMemoryIsBoundToImage(const layer_data *dev_data, const IMAGE_STATE *image_state, const char *api_name,
757                                  UNIQUE_VALIDATION_ERROR_CODE error_code) {
758    bool result = false;
759    if (0 == (static_cast<uint32_t>(image_state->createInfo.flags) & VK_IMAGE_CREATE_SPARSE_BINDING_BIT)) {
760        result = VerifyBoundMemoryIsValid(dev_data, image_state->binding.mem,
761                                          reinterpret_cast<const uint64_t &>(image_state->image), api_name, "Image", error_code);
762    }
763    return result;
764}
765
766// Check to see if memory was bound to this buffer
767bool ValidateMemoryIsBoundToBuffer(const layer_data *dev_data, const BUFFER_STATE *buffer_state, const char *api_name,
768                                   UNIQUE_VALIDATION_ERROR_CODE error_code) {
769    bool result = false;
770    if (0 == (static_cast<uint32_t>(buffer_state->createInfo.flags) & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)) {
771        result = VerifyBoundMemoryIsValid(dev_data, buffer_state->binding.mem,
772                                          reinterpret_cast<const uint64_t &>(buffer_state->buffer), api_name, "Buffer", error_code);
773    }
774    return result;
775}
776
777// SetMemBinding is used to establish immutable, non-sparse binding between a single image/buffer object and memory object
778// For NULL mem case, output warning
779// Make sure given object is in global object map
780//  IF a previous binding existed, output validation error
781//  Otherwise, add reference from objectInfo to memoryInfo
782//  Add reference off of objInfo
783// TODO: We may need to refactor or pass in multiple valid usage statements to handle multiple valid usage conditions.
784static bool SetMemBinding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, VkDebugReportObjectTypeEXT type,
785                          const char *apiName) {
786    bool skip_call = false;
787    // It's an error to bind an object to NULL memory
788    if (mem != VK_NULL_HANDLE) {
789        BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
790        assert(mem_binding);
791        // TODO : Add check here to make sure object isn't sparse
792        //  VALIDATION_ERROR_00792 for buffers
793        //  VALIDATION_ERROR_00804 for images
794        assert(!mem_binding->sparse);
795        DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem);
796        if (mem_info) {
797            DEVICE_MEM_INFO *prev_binding = getMemObjInfo(dev_data, mem_binding->binding.mem);
798            if (prev_binding) {
799                // TODO: VALIDATION_ERROR_00791 and VALIDATION_ERROR_00803
800                skip_call |=
801                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
802                            reinterpret_cast<uint64_t &>(mem), __LINE__, MEMTRACK_REBIND_OBJECT, "MEM",
803                            "In %s, attempting to bind memory (0x%" PRIxLEAST64 ") to object (0x%" PRIxLEAST64
804                            ") which has already been bound to mem object 0x%" PRIxLEAST64,
805                            apiName, reinterpret_cast<uint64_t &>(mem), handle, reinterpret_cast<uint64_t &>(prev_binding->mem));
806            } else if (mem_binding->binding.mem == MEMORY_UNBOUND) {
807                skip_call |=
808                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
809                            reinterpret_cast<uint64_t &>(mem), __LINE__, MEMTRACK_REBIND_OBJECT, "MEM",
810                            "In %s, attempting to bind memory (0x%" PRIxLEAST64 ") to object (0x%" PRIxLEAST64
811                            ") which was previous bound to memory that has since been freed. Memory bindings are immutable in "
812                            "Vulkan so this attempt to bind to new memory is not allowed.",
813                            apiName, reinterpret_cast<uint64_t &>(mem), handle);
814            } else {
815                mem_info->obj_bindings.insert({handle, type});
816                // For image objects, make sure default memory state is correctly set
817                // TODO : What's the best/correct way to handle this?
818                if (VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT == type) {
819                    auto const image_state = getImageState(dev_data, VkImage(handle));
820                    if (image_state) {
821                        VkImageCreateInfo ici = image_state->createInfo;
822                        if (ici.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
823                            // TODO::  More memory state transition stuff.
824                        }
825                    }
826                }
827                mem_binding->binding.mem = mem;
828            }
829        }
830    }
831    return skip_call;
832}
833
834// For NULL mem case, clear any previous binding Else...
835// Make sure given object is in its object map
836//  IF a previous binding existed, update binding
837//  Add reference from objectInfo to memoryInfo
838//  Add reference off of object's binding info
839// Return VK_TRUE if addition is successful, VK_FALSE otherwise
840static bool SetSparseMemBinding(layer_data *dev_data, MEM_BINDING binding, uint64_t handle, VkDebugReportObjectTypeEXT type,
841                                const char *apiName) {
842    bool skip_call = VK_FALSE;
843    // Handle NULL case separately, just clear previous binding & decrement reference
844    if (binding.mem == VK_NULL_HANDLE) {
845        // TODO : This should cause the range of the resource to be unbound according to spec
846    } else {
847        BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
848        assert(mem_binding);
849        assert(mem_binding->sparse);
850        DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, binding.mem);
851        if (mem_info) {
852            mem_info->obj_bindings.insert({handle, type});
853            // Need to set mem binding for this object
854            mem_binding->sparse_bindings.insert(binding);
855        }
856    }
857    return skip_call;
858}
859
860// Return a string representation of CMD_TYPE enum
861static string cmdTypeToString(CMD_TYPE cmd) {
862    switch (cmd) {
863        case CMD_BINDPIPELINE:
864            return "CMD_BINDPIPELINE";
865        case CMD_BINDPIPELINEDELTA:
866            return "CMD_BINDPIPELINEDELTA";
867        case CMD_SETVIEWPORTSTATE:
868            return "CMD_SETVIEWPORTSTATE";
869        case CMD_SETLINEWIDTHSTATE:
870            return "CMD_SETLINEWIDTHSTATE";
871        case CMD_SETDEPTHBIASSTATE:
872            return "CMD_SETDEPTHBIASSTATE";
873        case CMD_SETBLENDSTATE:
874            return "CMD_SETBLENDSTATE";
875        case CMD_SETDEPTHBOUNDSSTATE:
876            return "CMD_SETDEPTHBOUNDSSTATE";
877        case CMD_SETSTENCILREADMASKSTATE:
878            return "CMD_SETSTENCILREADMASKSTATE";
879        case CMD_SETSTENCILWRITEMASKSTATE:
880            return "CMD_SETSTENCILWRITEMASKSTATE";
881        case CMD_SETSTENCILREFERENCESTATE:
882            return "CMD_SETSTENCILREFERENCESTATE";
883        case CMD_BINDDESCRIPTORSETS:
884            return "CMD_BINDDESCRIPTORSETS";
885        case CMD_BINDINDEXBUFFER:
886            return "CMD_BINDINDEXBUFFER";
887        case CMD_BINDVERTEXBUFFER:
888            return "CMD_BINDVERTEXBUFFER";
889        case CMD_DRAW:
890            return "CMD_DRAW";
891        case CMD_DRAWINDEXED:
892            return "CMD_DRAWINDEXED";
893        case CMD_DRAWINDIRECT:
894            return "CMD_DRAWINDIRECT";
895        case CMD_DRAWINDEXEDINDIRECT:
896            return "CMD_DRAWINDEXEDINDIRECT";
897        case CMD_DISPATCH:
898            return "CMD_DISPATCH";
899        case CMD_DISPATCHINDIRECT:
900            return "CMD_DISPATCHINDIRECT";
901        case CMD_COPYBUFFER:
902            return "CMD_COPYBUFFER";
903        case CMD_COPYIMAGE:
904            return "CMD_COPYIMAGE";
905        case CMD_BLITIMAGE:
906            return "CMD_BLITIMAGE";
907        case CMD_COPYBUFFERTOIMAGE:
908            return "CMD_COPYBUFFERTOIMAGE";
909        case CMD_COPYIMAGETOBUFFER:
910            return "CMD_COPYIMAGETOBUFFER";
911        case CMD_CLONEIMAGEDATA:
912            return "CMD_CLONEIMAGEDATA";
913        case CMD_UPDATEBUFFER:
914            return "CMD_UPDATEBUFFER";
915        case CMD_FILLBUFFER:
916            return "CMD_FILLBUFFER";
917        case CMD_CLEARCOLORIMAGE:
918            return "CMD_CLEARCOLORIMAGE";
919        case CMD_CLEARATTACHMENTS:
920            return "CMD_CLEARCOLORATTACHMENT";
921        case CMD_CLEARDEPTHSTENCILIMAGE:
922            return "CMD_CLEARDEPTHSTENCILIMAGE";
923        case CMD_RESOLVEIMAGE:
924            return "CMD_RESOLVEIMAGE";
925        case CMD_SETEVENT:
926            return "CMD_SETEVENT";
927        case CMD_RESETEVENT:
928            return "CMD_RESETEVENT";
929        case CMD_WAITEVENTS:
930            return "CMD_WAITEVENTS";
931        case CMD_PIPELINEBARRIER:
932            return "CMD_PIPELINEBARRIER";
933        case CMD_BEGINQUERY:
934            return "CMD_BEGINQUERY";
935        case CMD_ENDQUERY:
936            return "CMD_ENDQUERY";
937        case CMD_RESETQUERYPOOL:
938            return "CMD_RESETQUERYPOOL";
939        case CMD_COPYQUERYPOOLRESULTS:
940            return "CMD_COPYQUERYPOOLRESULTS";
941        case CMD_WRITETIMESTAMP:
942            return "CMD_WRITETIMESTAMP";
943        case CMD_INITATOMICCOUNTERS:
944            return "CMD_INITATOMICCOUNTERS";
945        case CMD_LOADATOMICCOUNTERS:
946            return "CMD_LOADATOMICCOUNTERS";
947        case CMD_SAVEATOMICCOUNTERS:
948            return "CMD_SAVEATOMICCOUNTERS";
949        case CMD_BEGINRENDERPASS:
950            return "CMD_BEGINRENDERPASS";
951        case CMD_ENDRENDERPASS:
952            return "CMD_ENDRENDERPASS";
953        default:
954            return "UNKNOWN";
955    }
956}
957
958// SPIRV utility functions
959static void build_def_index(shader_module *module) {
960    for (auto insn : *module) {
961        switch (insn.opcode()) {
962            // Types
963            case spv::OpTypeVoid:
964            case spv::OpTypeBool:
965            case spv::OpTypeInt:
966            case spv::OpTypeFloat:
967            case spv::OpTypeVector:
968            case spv::OpTypeMatrix:
969            case spv::OpTypeImage:
970            case spv::OpTypeSampler:
971            case spv::OpTypeSampledImage:
972            case spv::OpTypeArray:
973            case spv::OpTypeRuntimeArray:
974            case spv::OpTypeStruct:
975            case spv::OpTypeOpaque:
976            case spv::OpTypePointer:
977            case spv::OpTypeFunction:
978            case spv::OpTypeEvent:
979            case spv::OpTypeDeviceEvent:
980            case spv::OpTypeReserveId:
981            case spv::OpTypeQueue:
982            case spv::OpTypePipe:
983                module->def_index[insn.word(1)] = insn.offset();
984                break;
985
986            // Fixed constants
987            case spv::OpConstantTrue:
988            case spv::OpConstantFalse:
989            case spv::OpConstant:
990            case spv::OpConstantComposite:
991            case spv::OpConstantSampler:
992            case spv::OpConstantNull:
993                module->def_index[insn.word(2)] = insn.offset();
994                break;
995
996            // Specialization constants
997            case spv::OpSpecConstantTrue:
998            case spv::OpSpecConstantFalse:
999            case spv::OpSpecConstant:
1000            case spv::OpSpecConstantComposite:
1001            case spv::OpSpecConstantOp:
1002                module->def_index[insn.word(2)] = insn.offset();
1003                break;
1004
1005            // Variables
1006            case spv::OpVariable:
1007                module->def_index[insn.word(2)] = insn.offset();
1008                break;
1009
1010            // Functions
1011            case spv::OpFunction:
1012                module->def_index[insn.word(2)] = insn.offset();
1013                break;
1014
1015            default:
1016                // We don't care about any other defs for now.
1017                break;
1018        }
1019    }
1020}
1021
1022static spirv_inst_iter find_entrypoint(shader_module *src, char const *name, VkShaderStageFlagBits stageBits) {
1023    for (auto insn : *src) {
1024        if (insn.opcode() == spv::OpEntryPoint) {
1025            auto entrypointName = (char const *)&insn.word(3);
1026            auto entrypointStageBits = 1u << insn.word(1);
1027
1028            if (!strcmp(entrypointName, name) && (entrypointStageBits & stageBits)) {
1029                return insn;
1030            }
1031        }
1032    }
1033
1034    return src->end();
1035}
1036
1037static char const *storage_class_name(unsigned sc) {
1038    switch (sc) {
1039        case spv::StorageClassInput:
1040            return "input";
1041        case spv::StorageClassOutput:
1042            return "output";
1043        case spv::StorageClassUniformConstant:
1044            return "const uniform";
1045        case spv::StorageClassUniform:
1046            return "uniform";
1047        case spv::StorageClassWorkgroup:
1048            return "workgroup local";
1049        case spv::StorageClassCrossWorkgroup:
1050            return "workgroup global";
1051        case spv::StorageClassPrivate:
1052            return "private global";
1053        case spv::StorageClassFunction:
1054            return "function";
1055        case spv::StorageClassGeneric:
1056            return "generic";
1057        case spv::StorageClassAtomicCounter:
1058            return "atomic counter";
1059        case spv::StorageClassImage:
1060            return "image";
1061        case spv::StorageClassPushConstant:
1062            return "push constant";
1063        default:
1064            return "unknown";
1065    }
1066}
1067
1068// Get the value of an integral constant
1069unsigned get_constant_value(shader_module const *src, unsigned id) {
1070    auto value = src->get_def(id);
1071    assert(value != src->end());
1072
1073    if (value.opcode() != spv::OpConstant) {
1074        // TODO: Either ensure that the specialization transform is already performed on a module we're
1075        //       considering here, OR -- specialize on the fly now.
1076        return 1;
1077    }
1078
1079    return value.word(3);
1080}
1081
1082static void describe_type_inner(std::ostringstream &ss, shader_module const *src, unsigned type) {
1083    auto insn = src->get_def(type);
1084    assert(insn != src->end());
1085
1086    switch (insn.opcode()) {
1087        case spv::OpTypeBool:
1088            ss << "bool";
1089            break;
1090        case spv::OpTypeInt:
1091            ss << (insn.word(3) ? 's' : 'u') << "int" << insn.word(2);
1092            break;
1093        case spv::OpTypeFloat:
1094            ss << "float" << insn.word(2);
1095            break;
1096        case spv::OpTypeVector:
1097            ss << "vec" << insn.word(3) << " of ";
1098            describe_type_inner(ss, src, insn.word(2));
1099            break;
1100        case spv::OpTypeMatrix:
1101            ss << "mat" << insn.word(3) << " of ";
1102            describe_type_inner(ss, src, insn.word(2));
1103            break;
1104        case spv::OpTypeArray:
1105            ss << "arr[" << get_constant_value(src, insn.word(3)) << "] of ";
1106            describe_type_inner(ss, src, insn.word(2));
1107            break;
1108        case spv::OpTypePointer:
1109            ss << "ptr to " << storage_class_name(insn.word(2)) << " ";
1110            describe_type_inner(ss, src, insn.word(3));
1111            break;
1112        case spv::OpTypeStruct: {
1113            ss << "struct of (";
1114            for (unsigned i = 2; i < insn.len(); i++) {
1115                describe_type_inner(ss, src, insn.word(i));
1116                if (i == insn.len() - 1) {
1117                    ss << ")";
1118                } else {
1119                    ss << ", ";
1120                }
1121            }
1122            break;
1123        }
1124        case spv::OpTypeSampler:
1125            ss << "sampler";
1126            break;
1127        case spv::OpTypeSampledImage:
1128            ss << "sampler+";
1129            describe_type_inner(ss, src, insn.word(2));
1130            break;
1131        case spv::OpTypeImage:
1132            ss << "image(dim=" << insn.word(3) << ", sampled=" << insn.word(7) << ")";
1133            break;
1134        default:
1135            ss << "oddtype";
1136            break;
1137    }
1138}
1139
1140static std::string describe_type(shader_module const *src, unsigned type) {
1141    std::ostringstream ss;
1142    describe_type_inner(ss, src, type);
1143    return ss.str();
1144}
1145
1146static bool is_narrow_numeric_type(spirv_inst_iter type) {
1147    if (type.opcode() != spv::OpTypeInt && type.opcode() != spv::OpTypeFloat) return false;
1148    return type.word(2) < 64;
1149}
1150
1151static bool types_match(shader_module const *a, shader_module const *b, unsigned a_type, unsigned b_type, bool a_arrayed,
1152                        bool b_arrayed, bool relaxed) {
1153    // Walk two type trees together, and complain about differences
1154    auto a_insn = a->get_def(a_type);
1155    auto b_insn = b->get_def(b_type);
1156    assert(a_insn != a->end());
1157    assert(b_insn != b->end());
1158
1159    if (a_arrayed && a_insn.opcode() == spv::OpTypeArray) {
1160        return types_match(a, b, a_insn.word(2), b_type, false, b_arrayed, relaxed);
1161    }
1162
1163    if (b_arrayed && b_insn.opcode() == spv::OpTypeArray) {
1164        // We probably just found the extra level of arrayness in b_type: compare the type inside it to a_type
1165        return types_match(a, b, a_type, b_insn.word(2), a_arrayed, false, relaxed);
1166    }
1167
1168    if (a_insn.opcode() == spv::OpTypeVector && relaxed && is_narrow_numeric_type(b_insn)) {
1169        return types_match(a, b, a_insn.word(2), b_type, a_arrayed, b_arrayed, false);
1170    }
1171
1172    if (a_insn.opcode() != b_insn.opcode()) {
1173        return false;
1174    }
1175
1176    if (a_insn.opcode() == spv::OpTypePointer) {
1177        // Match on pointee type. storage class is expected to differ
1178        return types_match(a, b, a_insn.word(3), b_insn.word(3), a_arrayed, b_arrayed, relaxed);
1179    }
1180
1181    if (a_arrayed || b_arrayed) {
1182        // If we havent resolved array-of-verts by here, we're not going to.
1183        return false;
1184    }
1185
1186    switch (a_insn.opcode()) {
1187        case spv::OpTypeBool:
1188            return true;
1189        case spv::OpTypeInt:
1190            // Match on width, signedness
1191            return a_insn.word(2) == b_insn.word(2) && a_insn.word(3) == b_insn.word(3);
1192        case spv::OpTypeFloat:
1193            // Match on width
1194            return a_insn.word(2) == b_insn.word(2);
1195        case spv::OpTypeVector:
1196            // Match on element type, count.
1197            if (!types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false)) return false;
1198            if (relaxed && is_narrow_numeric_type(a->get_def(a_insn.word(2)))) {
1199                return a_insn.word(3) >= b_insn.word(3);
1200            } else {
1201                return a_insn.word(3) == b_insn.word(3);
1202            }
1203        case spv::OpTypeMatrix:
1204            // Match on element type, count.
1205            return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false) &&
1206                   a_insn.word(3) == b_insn.word(3);
1207        case spv::OpTypeArray:
1208            // Match on element type, count. these all have the same layout. we don't get here if b_arrayed. This differs from
1209            // vector & matrix types in that the array size is the id of a constant instruction, * not a literal within OpTypeArray
1210            return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false) &&
1211                   get_constant_value(a, a_insn.word(3)) == get_constant_value(b, b_insn.word(3));
1212        case spv::OpTypeStruct:
1213            // Match on all element types
1214            {
1215                if (a_insn.len() != b_insn.len()) {
1216                    return false;  // Structs cannot match if member counts differ
1217                }
1218
1219                for (unsigned i = 2; i < a_insn.len(); i++) {
1220                    if (!types_match(a, b, a_insn.word(i), b_insn.word(i), a_arrayed, b_arrayed, false)) {
1221                        return false;
1222                    }
1223                }
1224
1225                return true;
1226            }
1227        default:
1228            // Remaining types are CLisms, or may not appear in the interfaces we are interested in. Just claim no match.
1229            return false;
1230    }
1231}
1232
1233static int value_or_default(std::unordered_map<unsigned, unsigned> const &map, unsigned id, int def) {
1234    auto it = map.find(id);
1235    if (it == map.end())
1236        return def;
1237    else
1238        return it->second;
1239}
1240
1241static unsigned get_locations_consumed_by_type(shader_module const *src, unsigned type, bool strip_array_level) {
1242    auto insn = src->get_def(type);
1243    assert(insn != src->end());
1244
1245    switch (insn.opcode()) {
1246        case spv::OpTypePointer:
1247            // See through the ptr -- this is only ever at the toplevel for graphics shaders we're never actually passing
1248            // pointers around.
1249            return get_locations_consumed_by_type(src, insn.word(3), strip_array_level);
1250        case spv::OpTypeArray:
1251            if (strip_array_level) {
1252                return get_locations_consumed_by_type(src, insn.word(2), false);
1253            } else {
1254                return get_constant_value(src, insn.word(3)) * get_locations_consumed_by_type(src, insn.word(2), false);
1255            }
1256        case spv::OpTypeMatrix:
1257            // Num locations is the dimension * element size
1258            return insn.word(3) * get_locations_consumed_by_type(src, insn.word(2), false);
1259        case spv::OpTypeVector: {
1260            auto scalar_type = src->get_def(insn.word(2));
1261            auto bit_width =
1262                (scalar_type.opcode() == spv::OpTypeInt || scalar_type.opcode() == spv::OpTypeFloat) ? scalar_type.word(2) : 32;
1263
1264            // Locations are 128-bit wide; 3- and 4-component vectors of 64 bit types require two.
1265            return (bit_width * insn.word(3) + 127) / 128;
1266        }
1267        default:
1268            // Everything else is just 1.
1269            return 1;
1270
1271            // TODO: extend to handle 64bit scalar types, whose vectors may need multiple locations.
1272    }
1273}
1274
1275static unsigned get_locations_consumed_by_format(VkFormat format) {
1276    switch (format) {
1277        case VK_FORMAT_R64G64B64A64_SFLOAT:
1278        case VK_FORMAT_R64G64B64A64_SINT:
1279        case VK_FORMAT_R64G64B64A64_UINT:
1280        case VK_FORMAT_R64G64B64_SFLOAT:
1281        case VK_FORMAT_R64G64B64_SINT:
1282        case VK_FORMAT_R64G64B64_UINT:
1283            return 2;
1284        default:
1285            return 1;
1286    }
1287}
1288
1289typedef std::pair<unsigned, unsigned> location_t;
1290typedef std::pair<unsigned, unsigned> descriptor_slot_t;
1291
1292struct interface_var {
1293    uint32_t id;
1294    uint32_t type_id;
1295    uint32_t offset;
1296    bool is_patch;
1297    bool is_block_member;
1298    bool is_relaxed_precision;
1299    // TODO: collect the name, too? Isn't required to be present.
1300};
1301
1302struct shader_stage_attributes {
1303    char const *const name;
1304    bool arrayed_input;
1305    bool arrayed_output;
1306};
1307
1308static shader_stage_attributes shader_stage_attribs[] = {
1309    {"vertex shader", false, false},  {"tessellation control shader", true, true}, {"tessellation evaluation shader", true, false},
1310    {"geometry shader", true, false}, {"fragment shader", false, false},
1311};
1312
1313static spirv_inst_iter get_struct_type(shader_module const *src, spirv_inst_iter def, bool is_array_of_verts) {
1314    while (true) {
1315        if (def.opcode() == spv::OpTypePointer) {
1316            def = src->get_def(def.word(3));
1317        } else if (def.opcode() == spv::OpTypeArray && is_array_of_verts) {
1318            def = src->get_def(def.word(2));
1319            is_array_of_verts = false;
1320        } else if (def.opcode() == spv::OpTypeStruct) {
1321            return def;
1322        } else {
1323            return src->end();
1324        }
1325    }
1326}
1327
1328static void collect_interface_block_members(shader_module const *src, std::map<location_t, interface_var> *out,
1329                                            std::unordered_map<unsigned, unsigned> const &blocks, bool is_array_of_verts,
1330                                            uint32_t id, uint32_t type_id, bool is_patch) {
1331    // Walk down the type_id presented, trying to determine whether it's actually an interface block.
1332    auto type = get_struct_type(src, src->get_def(type_id), is_array_of_verts && !is_patch);
1333    if (type == src->end() || blocks.find(type.word(1)) == blocks.end()) {
1334        // This isn't an interface block.
1335        return;
1336    }
1337
1338    std::unordered_map<unsigned, unsigned> member_components;
1339    std::unordered_map<unsigned, unsigned> member_relaxed_precision;
1340
1341    // Walk all the OpMemberDecorate for type's result id -- first pass, collect components.
1342    for (auto insn : *src) {
1343        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1344            unsigned member_index = insn.word(2);
1345
1346            if (insn.word(3) == spv::DecorationComponent) {
1347                unsigned component = insn.word(4);
1348                member_components[member_index] = component;
1349            }
1350
1351            if (insn.word(3) == spv::DecorationRelaxedPrecision) {
1352                member_relaxed_precision[member_index] = 1;
1353            }
1354        }
1355    }
1356
1357    // Second pass -- produce the output, from Location decorations
1358    for (auto insn : *src) {
1359        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1360            unsigned member_index = insn.word(2);
1361            unsigned member_type_id = type.word(2 + member_index);
1362
1363            if (insn.word(3) == spv::DecorationLocation) {
1364                unsigned location = insn.word(4);
1365                unsigned num_locations = get_locations_consumed_by_type(src, member_type_id, false);
1366                auto component_it = member_components.find(member_index);
1367                unsigned component = component_it == member_components.end() ? 0 : component_it->second;
1368                bool is_relaxed_precision = member_relaxed_precision.find(member_index) != member_relaxed_precision.end();
1369
1370                for (unsigned int offset = 0; offset < num_locations; offset++) {
1371                    interface_var v = {};
1372                    v.id = id;
1373                    // TODO: member index in interface_var too?
1374                    v.type_id = member_type_id;
1375                    v.offset = offset;
1376                    v.is_patch = is_patch;
1377                    v.is_block_member = true;
1378                    v.is_relaxed_precision = is_relaxed_precision;
1379                    (*out)[std::make_pair(location + offset, component)] = v;
1380                }
1381            }
1382        }
1383    }
1384}
1385
1386static std::map<location_t, interface_var> collect_interface_by_location(shader_module const *src, spirv_inst_iter entrypoint,
1387                                                                         spv::StorageClass sinterface, bool is_array_of_verts) {
1388    std::unordered_map<unsigned, unsigned> var_locations;
1389    std::unordered_map<unsigned, unsigned> var_builtins;
1390    std::unordered_map<unsigned, unsigned> var_components;
1391    std::unordered_map<unsigned, unsigned> blocks;
1392    std::unordered_map<unsigned, unsigned> var_patch;
1393    std::unordered_map<unsigned, unsigned> var_relaxed_precision;
1394
1395    for (auto insn : *src) {
1396        // We consider two interface models: SSO rendezvous-by-location, and builtins. Complain about anything that
1397        // fits neither model.
1398        if (insn.opcode() == spv::OpDecorate) {
1399            if (insn.word(2) == spv::DecorationLocation) {
1400                var_locations[insn.word(1)] = insn.word(3);
1401            }
1402
1403            if (insn.word(2) == spv::DecorationBuiltIn) {
1404                var_builtins[insn.word(1)] = insn.word(3);
1405            }
1406
1407            if (insn.word(2) == spv::DecorationComponent) {
1408                var_components[insn.word(1)] = insn.word(3);
1409            }
1410
1411            if (insn.word(2) == spv::DecorationBlock) {
1412                blocks[insn.word(1)] = 1;
1413            }
1414
1415            if (insn.word(2) == spv::DecorationPatch) {
1416                var_patch[insn.word(1)] = 1;
1417            }
1418
1419            if (insn.word(2) == spv::DecorationRelaxedPrecision) {
1420                var_relaxed_precision[insn.word(1)] = 1;
1421            }
1422        }
1423    }
1424
1425    // TODO: handle grouped decorations
1426    // TODO: handle index=1 dual source outputs from FS -- two vars will have the same location, and we DON'T want to clobber.
1427
1428    // Find the end of the entrypoint's name string. additional zero bytes follow the actual null terminator, to fill out the
1429    // rest of the word - so we only need to look at the last byte in the word to determine which word contains the terminator.
1430    uint32_t word = 3;
1431    while (entrypoint.word(word) & 0xff000000u) {
1432        ++word;
1433    }
1434    ++word;
1435
1436    std::map<location_t, interface_var> out;
1437
1438    for (; word < entrypoint.len(); word++) {
1439        auto insn = src->get_def(entrypoint.word(word));
1440        assert(insn != src->end());
1441        assert(insn.opcode() == spv::OpVariable);
1442
1443        if (insn.word(3) == static_cast<uint32_t>(sinterface)) {
1444            unsigned id = insn.word(2);
1445            unsigned type = insn.word(1);
1446
1447            int location = value_or_default(var_locations, id, -1);
1448            int builtin = value_or_default(var_builtins, id, -1);
1449            unsigned component = value_or_default(var_components, id, 0);  // Unspecified is OK, is 0
1450            bool is_patch = var_patch.find(id) != var_patch.end();
1451            bool is_relaxed_precision = var_relaxed_precision.find(id) != var_relaxed_precision.end();
1452
1453            // All variables and interface block members in the Input or Output storage classes must be decorated with either
1454            // a builtin or an explicit location.
1455            //
1456            // TODO: integrate the interface block support here. For now, don't complain -- a valid SPIRV module will only hit
1457            // this path for the interface block case, as the individual members of the type are decorated, rather than
1458            // variable declarations.
1459
1460            if (location != -1) {
1461                // A user-defined interface variable, with a location. Where a variable occupied multiple locations, emit
1462                // one result for each.
1463                unsigned num_locations = get_locations_consumed_by_type(src, type, is_array_of_verts && !is_patch);
1464                for (unsigned int offset = 0; offset < num_locations; offset++) {
1465                    interface_var v = {};
1466                    v.id = id;
1467                    v.type_id = type;
1468                    v.offset = offset;
1469                    v.is_patch = is_patch;
1470                    v.is_relaxed_precision = is_relaxed_precision;
1471                    out[std::make_pair(location + offset, component)] = v;
1472                }
1473            } else if (builtin == -1) {
1474                // An interface block instance
1475                collect_interface_block_members(src, &out, blocks, is_array_of_verts, id, type, is_patch);
1476            }
1477        }
1478    }
1479
1480    return out;
1481}
1482
1483static std::vector<std::pair<uint32_t, interface_var>> collect_interface_by_input_attachment_index(
1484    debug_report_data *report_data, shader_module const *src, std::unordered_set<uint32_t> const &accessible_ids) {
1485    std::vector<std::pair<uint32_t, interface_var>> out;
1486
1487    for (auto insn : *src) {
1488        if (insn.opcode() == spv::OpDecorate) {
1489            if (insn.word(2) == spv::DecorationInputAttachmentIndex) {
1490                auto attachment_index = insn.word(3);
1491                auto id = insn.word(1);
1492
1493                if (accessible_ids.count(id)) {
1494                    auto def = src->get_def(id);
1495                    assert(def != src->end());
1496
1497                    if (def.opcode() == spv::OpVariable && insn.word(3) == spv::StorageClassUniformConstant) {
1498                        auto num_locations = get_locations_consumed_by_type(src, def.word(1), false);
1499                        for (unsigned int offset = 0; offset < num_locations; offset++) {
1500                            interface_var v = {};
1501                            v.id = id;
1502                            v.type_id = def.word(1);
1503                            v.offset = offset;
1504                            out.emplace_back(attachment_index + offset, v);
1505                        }
1506                    }
1507                }
1508            }
1509        }
1510    }
1511
1512    return out;
1513}
1514
1515static std::vector<std::pair<descriptor_slot_t, interface_var>> collect_interface_by_descriptor_slot(
1516    debug_report_data *report_data, shader_module const *src, std::unordered_set<uint32_t> const &accessible_ids) {
1517    std::unordered_map<unsigned, unsigned> var_sets;
1518    std::unordered_map<unsigned, unsigned> var_bindings;
1519
1520    for (auto insn : *src) {
1521        // All variables in the Uniform or UniformConstant storage classes are required to be decorated with both
1522        // DecorationDescriptorSet and DecorationBinding.
1523        if (insn.opcode() == spv::OpDecorate) {
1524            if (insn.word(2) == spv::DecorationDescriptorSet) {
1525                var_sets[insn.word(1)] = insn.word(3);
1526            }
1527
1528            if (insn.word(2) == spv::DecorationBinding) {
1529                var_bindings[insn.word(1)] = insn.word(3);
1530            }
1531        }
1532    }
1533
1534    std::vector<std::pair<descriptor_slot_t, interface_var>> out;
1535
1536    for (auto id : accessible_ids) {
1537        auto insn = src->get_def(id);
1538        assert(insn != src->end());
1539
1540        if (insn.opcode() == spv::OpVariable &&
1541            (insn.word(3) == spv::StorageClassUniform || insn.word(3) == spv::StorageClassUniformConstant)) {
1542            unsigned set = value_or_default(var_sets, insn.word(2), 0);
1543            unsigned binding = value_or_default(var_bindings, insn.word(2), 0);
1544
1545            interface_var v = {};
1546            v.id = insn.word(2);
1547            v.type_id = insn.word(1);
1548            out.emplace_back(std::make_pair(set, binding), v);
1549        }
1550    }
1551
1552    return out;
1553}
1554
1555static bool validate_interface_between_stages(debug_report_data *report_data, shader_module const *producer,
1556                                              spirv_inst_iter producer_entrypoint, shader_stage_attributes const *producer_stage,
1557                                              shader_module const *consumer, spirv_inst_iter consumer_entrypoint,
1558                                              shader_stage_attributes const *consumer_stage) {
1559    bool pass = true;
1560
1561    auto outputs =
1562        collect_interface_by_location(producer, producer_entrypoint, spv::StorageClassOutput, producer_stage->arrayed_output);
1563    auto inputs =
1564        collect_interface_by_location(consumer, consumer_entrypoint, spv::StorageClassInput, consumer_stage->arrayed_input);
1565
1566    auto a_it = outputs.begin();
1567    auto b_it = inputs.begin();
1568
1569    // Maps sorted by key (location); walk them together to find mismatches
1570    while ((outputs.size() > 0 && a_it != outputs.end()) || (inputs.size() && b_it != inputs.end())) {
1571        bool a_at_end = outputs.size() == 0 || a_it == outputs.end();
1572        bool b_at_end = inputs.size() == 0 || b_it == inputs.end();
1573        auto a_first = a_at_end ? std::make_pair(0u, 0u) : a_it->first;
1574        auto b_first = b_at_end ? std::make_pair(0u, 0u) : b_it->first;
1575
1576        if (b_at_end || ((!a_at_end) && (a_first < b_first))) {
1577            if (log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
1578                        SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC", "%s writes to output location %u.%u which is not consumed by %s",
1579                        producer_stage->name, a_first.first, a_first.second, consumer_stage->name)) {
1580                pass = false;
1581            }
1582            a_it++;
1583        } else if (a_at_end || a_first > b_first) {
1584            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
1585                        SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "%s consumes input location %u.%u which is not written by %s",
1586                        consumer_stage->name, b_first.first, b_first.second, producer_stage->name)) {
1587                pass = false;
1588            }
1589            b_it++;
1590        } else {
1591            // subtleties of arrayed interfaces:
1592            // - if is_patch, then the member is not arrayed, even though the interface may be.
1593            // - if is_block_member, then the extra array level of an arrayed interface is not
1594            //   expressed in the member type -- it's expressed in the block type.
1595            if (!types_match(producer, consumer, a_it->second.type_id, b_it->second.type_id,
1596                             producer_stage->arrayed_output && !a_it->second.is_patch && !a_it->second.is_block_member,
1597                             consumer_stage->arrayed_input && !b_it->second.is_patch && !b_it->second.is_block_member, true)) {
1598                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
1599                            SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC", "Type mismatch on location %u.%u: '%s' vs '%s'",
1600                            a_first.first, a_first.second, describe_type(producer, a_it->second.type_id).c_str(),
1601                            describe_type(consumer, b_it->second.type_id).c_str())) {
1602                    pass = false;
1603                }
1604            }
1605            if (a_it->second.is_patch != b_it->second.is_patch) {
1606                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 0, __LINE__,
1607                            SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1608                            "Decoration mismatch on location %u.%u: is per-%s in %s stage but "
1609                            "per-%s in %s stage",
1610                            a_first.first, a_first.second, a_it->second.is_patch ? "patch" : "vertex", producer_stage->name,
1611                            b_it->second.is_patch ? "patch" : "vertex", consumer_stage->name)) {
1612                    pass = false;
1613                }
1614            }
1615            if (a_it->second.is_relaxed_precision != b_it->second.is_relaxed_precision) {
1616                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 0, __LINE__,
1617                            SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1618                            "Decoration mismatch on location %u.%u: %s and %s stages differ in precision", a_first.first,
1619                            a_first.second, producer_stage->name, consumer_stage->name)) {
1620                    pass = false;
1621                }
1622            }
1623            a_it++;
1624            b_it++;
1625        }
1626    }
1627
1628    return pass;
1629}
1630
1631enum FORMAT_TYPE {
1632    FORMAT_TYPE_UNDEFINED,
1633    FORMAT_TYPE_FLOAT,  // UNORM, SNORM, FLOAT, USCALED, SSCALED, SRGB -- anything we consider float in the shader
1634    FORMAT_TYPE_SINT,
1635    FORMAT_TYPE_UINT,
1636};
1637
1638static unsigned get_format_type(VkFormat fmt) {
1639    switch (fmt) {
1640        case VK_FORMAT_UNDEFINED:
1641            return FORMAT_TYPE_UNDEFINED;
1642        case VK_FORMAT_R8_SINT:
1643        case VK_FORMAT_R8G8_SINT:
1644        case VK_FORMAT_R8G8B8_SINT:
1645        case VK_FORMAT_R8G8B8A8_SINT:
1646        case VK_FORMAT_R16_SINT:
1647        case VK_FORMAT_R16G16_SINT:
1648        case VK_FORMAT_R16G16B16_SINT:
1649        case VK_FORMAT_R16G16B16A16_SINT:
1650        case VK_FORMAT_R32_SINT:
1651        case VK_FORMAT_R32G32_SINT:
1652        case VK_FORMAT_R32G32B32_SINT:
1653        case VK_FORMAT_R32G32B32A32_SINT:
1654        case VK_FORMAT_R64_SINT:
1655        case VK_FORMAT_R64G64_SINT:
1656        case VK_FORMAT_R64G64B64_SINT:
1657        case VK_FORMAT_R64G64B64A64_SINT:
1658        case VK_FORMAT_B8G8R8_SINT:
1659        case VK_FORMAT_B8G8R8A8_SINT:
1660        case VK_FORMAT_A8B8G8R8_SINT_PACK32:
1661        case VK_FORMAT_A2B10G10R10_SINT_PACK32:
1662        case VK_FORMAT_A2R10G10B10_SINT_PACK32:
1663            return FORMAT_TYPE_SINT;
1664        case VK_FORMAT_R8_UINT:
1665        case VK_FORMAT_R8G8_UINT:
1666        case VK_FORMAT_R8G8B8_UINT:
1667        case VK_FORMAT_R8G8B8A8_UINT:
1668        case VK_FORMAT_R16_UINT:
1669        case VK_FORMAT_R16G16_UINT:
1670        case VK_FORMAT_R16G16B16_UINT:
1671        case VK_FORMAT_R16G16B16A16_UINT:
1672        case VK_FORMAT_R32_UINT:
1673        case VK_FORMAT_R32G32_UINT:
1674        case VK_FORMAT_R32G32B32_UINT:
1675        case VK_FORMAT_R32G32B32A32_UINT:
1676        case VK_FORMAT_R64_UINT:
1677        case VK_FORMAT_R64G64_UINT:
1678        case VK_FORMAT_R64G64B64_UINT:
1679        case VK_FORMAT_R64G64B64A64_UINT:
1680        case VK_FORMAT_B8G8R8_UINT:
1681        case VK_FORMAT_B8G8R8A8_UINT:
1682        case VK_FORMAT_A8B8G8R8_UINT_PACK32:
1683        case VK_FORMAT_A2B10G10R10_UINT_PACK32:
1684        case VK_FORMAT_A2R10G10B10_UINT_PACK32:
1685            return FORMAT_TYPE_UINT;
1686        default:
1687            return FORMAT_TYPE_FLOAT;
1688    }
1689}
1690
1691// characterizes a SPIR-V type appearing in an interface to a FF stage, for comparison to a VkFormat's characterization above.
1692static unsigned get_fundamental_type(shader_module const *src, unsigned type) {
1693    auto insn = src->get_def(type);
1694    assert(insn != src->end());
1695
1696    switch (insn.opcode()) {
1697        case spv::OpTypeInt:
1698            return insn.word(3) ? FORMAT_TYPE_SINT : FORMAT_TYPE_UINT;
1699        case spv::OpTypeFloat:
1700            return FORMAT_TYPE_FLOAT;
1701        case spv::OpTypeVector:
1702            return get_fundamental_type(src, insn.word(2));
1703        case spv::OpTypeMatrix:
1704            return get_fundamental_type(src, insn.word(2));
1705        case spv::OpTypeArray:
1706            return get_fundamental_type(src, insn.word(2));
1707        case spv::OpTypePointer:
1708            return get_fundamental_type(src, insn.word(3));
1709        case spv::OpTypeImage:
1710            return get_fundamental_type(src, insn.word(2));
1711
1712        default:
1713            return FORMAT_TYPE_UNDEFINED;
1714    }
1715}
1716
1717static uint32_t get_shader_stage_id(VkShaderStageFlagBits stage) {
1718    uint32_t bit_pos = u_ffs(stage);
1719    return bit_pos - 1;
1720}
1721
1722static bool validate_vi_consistency(debug_report_data *report_data, VkPipelineVertexInputStateCreateInfo const *vi) {
1723    // Walk the binding descriptions, which describe the step rate and stride of each vertex buffer.  Each binding should
1724    // be specified only once.
1725    std::unordered_map<uint32_t, VkVertexInputBindingDescription const *> bindings;
1726    bool pass = true;
1727
1728    for (unsigned i = 0; i < vi->vertexBindingDescriptionCount; i++) {
1729        auto desc = &vi->pVertexBindingDescriptions[i];
1730        auto &binding = bindings[desc->binding];
1731        if (binding) {
1732            // TODO: VALIDATION_ERROR_02105 perhaps?
1733            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
1734                        SHADER_CHECKER_INCONSISTENT_VI, "SC", "Duplicate vertex input binding descriptions for binding %d",
1735                        desc->binding)) {
1736                pass = false;
1737            }
1738        } else {
1739            binding = desc;
1740        }
1741    }
1742
1743    return pass;
1744}
1745
1746static bool validate_vi_against_vs_inputs(debug_report_data *report_data, VkPipelineVertexInputStateCreateInfo const *vi,
1747                                          shader_module const *vs, spirv_inst_iter entrypoint) {
1748    bool pass = true;
1749
1750    auto inputs = collect_interface_by_location(vs, entrypoint, spv::StorageClassInput, false);
1751
1752    // Build index by location
1753    std::map<uint32_t, VkVertexInputAttributeDescription const *> attribs;
1754    if (vi) {
1755        for (unsigned i = 0; i < vi->vertexAttributeDescriptionCount; i++) {
1756            auto num_locations = get_locations_consumed_by_format(vi->pVertexAttributeDescriptions[i].format);
1757            for (auto j = 0u; j < num_locations; j++) {
1758                attribs[vi->pVertexAttributeDescriptions[i].location + j] = &vi->pVertexAttributeDescriptions[i];
1759            }
1760        }
1761    }
1762
1763    auto it_a = attribs.begin();
1764    auto it_b = inputs.begin();
1765    bool used = false;
1766
1767    while ((attribs.size() > 0 && it_a != attribs.end()) || (inputs.size() > 0 && it_b != inputs.end())) {
1768        bool a_at_end = attribs.size() == 0 || it_a == attribs.end();
1769        bool b_at_end = inputs.size() == 0 || it_b == inputs.end();
1770        auto a_first = a_at_end ? 0 : it_a->first;
1771        auto b_first = b_at_end ? 0 : it_b->first.first;
1772        if (!a_at_end && (b_at_end || a_first < b_first)) {
1773            if (!used && log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1774                                 __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1775                                 "Vertex attribute at location %d not consumed by vertex shader", a_first)) {
1776                pass = false;
1777            }
1778            used = false;
1779            it_a++;
1780        } else if (!b_at_end && (a_at_end || b_first < a_first)) {
1781            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 0, __LINE__,
1782                        SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "Vertex shader consumes input at location %d but not provided",
1783                        b_first)) {
1784                pass = false;
1785            }
1786            it_b++;
1787        } else {
1788            unsigned attrib_type = get_format_type(it_a->second->format);
1789            unsigned input_type = get_fundamental_type(vs, it_b->second.type_id);
1790
1791            // Type checking
1792            if (attrib_type != FORMAT_TYPE_UNDEFINED && input_type != FORMAT_TYPE_UNDEFINED && attrib_type != input_type) {
1793                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
1794                            SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1795                            "Attribute type of `%s` at location %d does not match vertex shader input type of `%s`",
1796                            string_VkFormat(it_a->second->format), a_first, describe_type(vs, it_b->second.type_id).c_str())) {
1797                    pass = false;
1798                }
1799            }
1800
1801            // OK!
1802            used = true;
1803            it_b++;
1804        }
1805    }
1806
1807    return pass;
1808}
1809
1810static bool validate_fs_outputs_against_render_pass(debug_report_data *report_data, shader_module const *fs,
1811                                                    spirv_inst_iter entrypoint, VkRenderPassCreateInfo const *rpci,
1812                                                    uint32_t subpass_index) {
1813    std::map<uint32_t, VkFormat> color_attachments;
1814    auto subpass = rpci->pSubpasses[subpass_index];
1815    for (auto i = 0u; i < subpass.colorAttachmentCount; ++i) {
1816        uint32_t attachment = subpass.pColorAttachments[i].attachment;
1817        if (attachment == VK_ATTACHMENT_UNUSED) continue;
1818        if (rpci->pAttachments[attachment].format != VK_FORMAT_UNDEFINED) {
1819            color_attachments[i] = rpci->pAttachments[attachment].format;
1820        }
1821    }
1822
1823    bool pass = true;
1824
1825    // TODO: dual source blend index (spv::DecIndex, zero if not provided)
1826
1827    auto outputs = collect_interface_by_location(fs, entrypoint, spv::StorageClassOutput, false);
1828
1829    auto it_a = outputs.begin();
1830    auto it_b = color_attachments.begin();
1831
1832    // Walk attachment list and outputs together
1833
1834    while ((outputs.size() > 0 && it_a != outputs.end()) || (color_attachments.size() > 0 && it_b != color_attachments.end())) {
1835        bool a_at_end = outputs.size() == 0 || it_a == outputs.end();
1836        bool b_at_end = color_attachments.size() == 0 || it_b == color_attachments.end();
1837
1838        if (!a_at_end && (b_at_end || it_a->first.first < it_b->first)) {
1839            if (log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
1840                        SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1841                        "fragment shader writes to output location %d with no matching attachment", it_a->first.first)) {
1842                pass = false;
1843            }
1844            it_a++;
1845        } else if (!b_at_end && (a_at_end || it_a->first.first > it_b->first)) {
1846            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
1847                        SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "Attachment %d not written by fragment shader", it_b->first)) {
1848                pass = false;
1849            }
1850            it_b++;
1851        } else {
1852            unsigned output_type = get_fundamental_type(fs, it_a->second.type_id);
1853            unsigned att_type = get_format_type(it_b->second);
1854
1855            // Type checking
1856            if (att_type != FORMAT_TYPE_UNDEFINED && output_type != FORMAT_TYPE_UNDEFINED && att_type != output_type) {
1857                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
1858                            SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1859                            "Attachment %d of type `%s` does not match fragment shader output type of `%s`", it_b->first,
1860                            string_VkFormat(it_b->second), describe_type(fs, it_a->second.type_id).c_str())) {
1861                    pass = false;
1862                }
1863            }
1864
1865            // OK!
1866            it_a++;
1867            it_b++;
1868        }
1869    }
1870
1871    return pass;
1872}
1873
1874// For some analyses, we need to know about all ids referenced by the static call tree of a particular entrypoint. This is
1875// important for identifying the set of shader resources actually used by an entrypoint, for example.
1876// Note: we only explore parts of the image which might actually contain ids we care about for the above analyses.
1877//  - NOT the shader input/output interfaces.
1878//
1879// TODO: The set of interesting opcodes here was determined by eyeballing the SPIRV spec. It might be worth
1880// converting parts of this to be generated from the machine-readable spec instead.
1881static std::unordered_set<uint32_t> mark_accessible_ids(shader_module const *src, spirv_inst_iter entrypoint) {
1882    std::unordered_set<uint32_t> ids;
1883    std::unordered_set<uint32_t> worklist;
1884    worklist.insert(entrypoint.word(2));
1885
1886    while (!worklist.empty()) {
1887        auto id_iter = worklist.begin();
1888        auto id = *id_iter;
1889        worklist.erase(id_iter);
1890
1891        auto insn = src->get_def(id);
1892        if (insn == src->end()) {
1893            // ID is something we didn't collect in build_def_index. that's OK -- we'll stumble across all kinds of things here
1894            // that we may not care about.
1895            continue;
1896        }
1897
1898        // Try to add to the output set
1899        if (!ids.insert(id).second) {
1900            continue;  // If we already saw this id, we don't want to walk it again.
1901        }
1902
1903        switch (insn.opcode()) {
1904            case spv::OpFunction:
1905                // Scan whole body of the function, enlisting anything interesting
1906                while (++insn, insn.opcode() != spv::OpFunctionEnd) {
1907                    switch (insn.opcode()) {
1908                        case spv::OpLoad:
1909                        case spv::OpAtomicLoad:
1910                        case spv::OpAtomicExchange:
1911                        case spv::OpAtomicCompareExchange:
1912                        case spv::OpAtomicCompareExchangeWeak:
1913                        case spv::OpAtomicIIncrement:
1914                        case spv::OpAtomicIDecrement:
1915                        case spv::OpAtomicIAdd:
1916                        case spv::OpAtomicISub:
1917                        case spv::OpAtomicSMin:
1918                        case spv::OpAtomicUMin:
1919                        case spv::OpAtomicSMax:
1920                        case spv::OpAtomicUMax:
1921                        case spv::OpAtomicAnd:
1922                        case spv::OpAtomicOr:
1923                        case spv::OpAtomicXor:
1924                            worklist.insert(insn.word(3));  // ptr
1925                            break;
1926                        case spv::OpStore:
1927                        case spv::OpAtomicStore:
1928                            worklist.insert(insn.word(1));  // ptr
1929                            break;
1930                        case spv::OpAccessChain:
1931                        case spv::OpInBoundsAccessChain:
1932                            worklist.insert(insn.word(3));  // base ptr
1933                            break;
1934                        case spv::OpSampledImage:
1935                        case spv::OpImageSampleImplicitLod:
1936                        case spv::OpImageSampleExplicitLod:
1937                        case spv::OpImageSampleDrefImplicitLod:
1938                        case spv::OpImageSampleDrefExplicitLod:
1939                        case spv::OpImageSampleProjImplicitLod:
1940                        case spv::OpImageSampleProjExplicitLod:
1941                        case spv::OpImageSampleProjDrefImplicitLod:
1942                        case spv::OpImageSampleProjDrefExplicitLod:
1943                        case spv::OpImageFetch:
1944                        case spv::OpImageGather:
1945                        case spv::OpImageDrefGather:
1946                        case spv::OpImageRead:
1947                        case spv::OpImage:
1948                        case spv::OpImageQueryFormat:
1949                        case spv::OpImageQueryOrder:
1950                        case spv::OpImageQuerySizeLod:
1951                        case spv::OpImageQuerySize:
1952                        case spv::OpImageQueryLod:
1953                        case spv::OpImageQueryLevels:
1954                        case spv::OpImageQuerySamples:
1955                        case spv::OpImageSparseSampleImplicitLod:
1956                        case spv::OpImageSparseSampleExplicitLod:
1957                        case spv::OpImageSparseSampleDrefImplicitLod:
1958                        case spv::OpImageSparseSampleDrefExplicitLod:
1959                        case spv::OpImageSparseSampleProjImplicitLod:
1960                        case spv::OpImageSparseSampleProjExplicitLod:
1961                        case spv::OpImageSparseSampleProjDrefImplicitLod:
1962                        case spv::OpImageSparseSampleProjDrefExplicitLod:
1963                        case spv::OpImageSparseFetch:
1964                        case spv::OpImageSparseGather:
1965                        case spv::OpImageSparseDrefGather:
1966                        case spv::OpImageTexelPointer:
1967                            worklist.insert(insn.word(3));  // Image or sampled image
1968                            break;
1969                        case spv::OpImageWrite:
1970                            worklist.insert(insn.word(1));  // Image -- different operand order to above
1971                            break;
1972                        case spv::OpFunctionCall:
1973                            for (uint32_t i = 3; i < insn.len(); i++) {
1974                                worklist.insert(insn.word(i));  // fn itself, and all args
1975                            }
1976                            break;
1977
1978                        case spv::OpExtInst:
1979                            for (uint32_t i = 5; i < insn.len(); i++) {
1980                                worklist.insert(insn.word(i));  // Operands to ext inst
1981                            }
1982                            break;
1983                    }
1984                }
1985                break;
1986        }
1987    }
1988
1989    return ids;
1990}
1991
1992static bool validate_push_constant_block_against_pipeline(debug_report_data *report_data,
1993                                                          std::vector<VkPushConstantRange> const *push_constant_ranges,
1994                                                          shader_module const *src, spirv_inst_iter type,
1995                                                          VkShaderStageFlagBits stage) {
1996    bool pass = true;
1997
1998    // Strip off ptrs etc
1999    type = get_struct_type(src, type, false);
2000    assert(type != src->end());
2001
2002    // Validate directly off the offsets. this isn't quite correct for arrays and matrices, but is a good first step.
2003    // TODO: arrays, matrices, weird sizes
2004    for (auto insn : *src) {
2005        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
2006            if (insn.word(3) == spv::DecorationOffset) {
2007                unsigned offset = insn.word(4);
2008                auto size = 4;  // Bytes; TODO: calculate this based on the type
2009
2010                bool found_range = false;
2011                for (auto const &range : *push_constant_ranges) {
2012                    if (range.offset <= offset && range.offset + range.size >= offset + size) {
2013                        found_range = true;
2014
2015                        if ((range.stageFlags & stage) == 0) {
2016                            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2017                                        SHADER_CHECKER_PUSH_CONSTANT_NOT_ACCESSIBLE_FROM_STAGE, "SC",
2018                                        "Push constant range covering variable starting at "
2019                                        "offset %u not accessible from stage %s",
2020                                        offset, string_VkShaderStageFlagBits(stage))) {
2021                                pass = false;
2022                            }
2023                        }
2024
2025                        break;
2026                    }
2027                }
2028
2029                if (!found_range) {
2030                    if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2031                                SHADER_CHECKER_PUSH_CONSTANT_OUT_OF_RANGE, "SC",
2032                                "Push constant range covering variable starting at "
2033                                "offset %u not declared in layout",
2034                                offset)) {
2035                        pass = false;
2036                    }
2037                }
2038            }
2039        }
2040    }
2041
2042    return pass;
2043}
2044
2045static bool validate_push_constant_usage(debug_report_data *report_data,
2046                                         std::vector<VkPushConstantRange> const *push_constant_ranges, shader_module const *src,
2047                                         std::unordered_set<uint32_t> accessible_ids, VkShaderStageFlagBits stage) {
2048    bool pass = true;
2049
2050    for (auto id : accessible_ids) {
2051        auto def_insn = src->get_def(id);
2052        if (def_insn.opcode() == spv::OpVariable && def_insn.word(3) == spv::StorageClassPushConstant) {
2053            pass &= validate_push_constant_block_against_pipeline(report_data, push_constant_ranges, src,
2054                                                                  src->get_def(def_insn.word(1)), stage);
2055        }
2056    }
2057
2058    return pass;
2059}
2060
2061// For given pipelineLayout verify that the set_layout_node at slot.first
2062//  has the requested binding at slot.second and return ptr to that binding
2063static VkDescriptorSetLayoutBinding const *get_descriptor_binding(PIPELINE_LAYOUT_NODE const *pipelineLayout,
2064                                                                  descriptor_slot_t slot) {
2065    if (!pipelineLayout) return nullptr;
2066
2067    if (slot.first >= pipelineLayout->set_layouts.size()) return nullptr;
2068
2069    return pipelineLayout->set_layouts[slot.first]->GetDescriptorSetLayoutBindingPtrFromBinding(slot.second);
2070}
2071
2072// Check object status for selected flag state
2073static bool validate_status(layer_data *my_data, GLOBAL_CB_NODE *pNode, CBStatusFlags status_mask, VkFlags msg_flags,
2074                            const char *fail_msg, UNIQUE_VALIDATION_ERROR_CODE const msg_code) {
2075    if (!(pNode->status & status_mask)) {
2076        char const *const message = validation_error_map[msg_code];
2077        return log_msg(my_data->report_data, msg_flags, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2078                       reinterpret_cast<const uint64_t &>(pNode->commandBuffer), __LINE__, msg_code, "DS",
2079                       "command buffer object 0x%p: %s. %s.", pNode->commandBuffer, fail_msg, message);
2080    }
2081    return false;
2082}
2083
2084// Retrieve pipeline node ptr for given pipeline object
2085static PIPELINE_STATE *getPipelineState(layer_data const *my_data, VkPipeline pipeline) {
2086    auto it = my_data->pipelineMap.find(pipeline);
2087    if (it == my_data->pipelineMap.end()) {
2088        return nullptr;
2089    }
2090    return it->second;
2091}
2092
2093RENDER_PASS_STATE *getRenderPassState(layer_data const *my_data, VkRenderPass renderpass) {
2094    auto it = my_data->renderPassMap.find(renderpass);
2095    if (it == my_data->renderPassMap.end()) {
2096        return nullptr;
2097    }
2098    return it->second.get();
2099}
2100
2101FRAMEBUFFER_STATE *getFramebufferState(const layer_data *my_data, VkFramebuffer framebuffer) {
2102    auto it = my_data->frameBufferMap.find(framebuffer);
2103    if (it == my_data->frameBufferMap.end()) {
2104        return nullptr;
2105    }
2106    return it->second.get();
2107}
2108
2109cvdescriptorset::DescriptorSetLayout const *getDescriptorSetLayout(layer_data const *my_data, VkDescriptorSetLayout dsLayout) {
2110    auto it = my_data->descriptorSetLayoutMap.find(dsLayout);
2111    if (it == my_data->descriptorSetLayoutMap.end()) {
2112        return nullptr;
2113    }
2114    return it->second;
2115}
2116
2117static PIPELINE_LAYOUT_NODE const *getPipelineLayout(layer_data const *my_data, VkPipelineLayout pipeLayout) {
2118    auto it = my_data->pipelineLayoutMap.find(pipeLayout);
2119    if (it == my_data->pipelineLayoutMap.end()) {
2120        return nullptr;
2121    }
2122    return &it->second;
2123}
2124
2125VkPhysicalDeviceLimits GetPhysicalDeviceLimits(layer_data const *dev_data) {
2126    return dev_data->phys_dev_properties.properties.limits;
2127}
2128
2129// Return true if for a given PSO, the given state enum is dynamic, else return false
2130static bool isDynamic(const PIPELINE_STATE *pPipeline, const VkDynamicState state) {
2131    if (pPipeline && pPipeline->graphicsPipelineCI.pDynamicState) {
2132        for (uint32_t i = 0; i < pPipeline->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
2133            if (state == pPipeline->graphicsPipelineCI.pDynamicState->pDynamicStates[i]) return true;
2134        }
2135    }
2136    return false;
2137}
2138
2139// Validate state stored as flags at time of draw call
2140static bool validate_draw_state_flags(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const PIPELINE_STATE *pPipe, bool indexed,
2141                                      UNIQUE_VALIDATION_ERROR_CODE const msg_code) {
2142    bool result = false;
2143    if (pPipe->graphicsPipelineCI.pInputAssemblyState &&
2144        ((pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST) ||
2145         (pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP))) {
2146        result |= validate_status(dev_data, pCB, CBSTATUS_LINE_WIDTH_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2147                                  "Dynamic line width state not set for this command buffer", msg_code);
2148    }
2149    if (pPipe->graphicsPipelineCI.pRasterizationState &&
2150        (pPipe->graphicsPipelineCI.pRasterizationState->depthBiasEnable == VK_TRUE)) {
2151        result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BIAS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2152                                  "Dynamic depth bias state not set for this command buffer", msg_code);
2153    }
2154    if (pPipe->blendConstantsEnabled) {
2155        result |= validate_status(dev_data, pCB, CBSTATUS_BLEND_CONSTANTS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2156                                  "Dynamic blend constants state not set for this command buffer", msg_code);
2157    }
2158    if (pPipe->graphicsPipelineCI.pDepthStencilState &&
2159        (pPipe->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE)) {
2160        result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BOUNDS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2161                                  "Dynamic depth bounds state not set for this command buffer", msg_code);
2162    }
2163    if (pPipe->graphicsPipelineCI.pDepthStencilState &&
2164        (pPipe->graphicsPipelineCI.pDepthStencilState->stencilTestEnable == VK_TRUE)) {
2165        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_READ_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2166                                  "Dynamic stencil read mask state not set for this command buffer", msg_code);
2167        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_WRITE_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2168                                  "Dynamic stencil write mask state not set for this command buffer", msg_code);
2169        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_REFERENCE_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2170                                  "Dynamic stencil reference state not set for this command buffer", msg_code);
2171    }
2172    if (indexed) {
2173        result |= validate_status(dev_data, pCB, CBSTATUS_INDEX_BUFFER_BOUND, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2174                                  "Index buffer object not bound to this command buffer when Indexed Draw attempted", msg_code);
2175    }
2176
2177    return result;
2178}
2179
2180// Verify attachment reference compatibility according to spec
2181//  If one array is larger, treat missing elements of shorter array as VK_ATTACHMENT_UNUSED & other array much match this
2182//  If both AttachmentReference arrays have requested index, check their corresponding AttachmentDescriptions
2183//   to make sure that format and samples counts match.
2184//  If not, they are not compatible.
2185static bool attachment_references_compatible(const uint32_t index, const VkAttachmentReference *pPrimary,
2186                                             const uint32_t primaryCount, const VkAttachmentDescription *pPrimaryAttachments,
2187                                             const VkAttachmentReference *pSecondary, const uint32_t secondaryCount,
2188                                             const VkAttachmentDescription *pSecondaryAttachments) {
2189    // Check potential NULL cases first to avoid nullptr issues later
2190    if (pPrimary == nullptr) {
2191        if (pSecondary == nullptr) {
2192            return true;
2193        }
2194        return false;
2195    } else if (pSecondary == nullptr) {
2196        return false;
2197    }
2198    if (index >= primaryCount) {  // Check secondary as if primary is VK_ATTACHMENT_UNUSED
2199        if (VK_ATTACHMENT_UNUSED == pSecondary[index].attachment) return true;
2200    } else if (index >= secondaryCount) {  // Check primary as if secondary is VK_ATTACHMENT_UNUSED
2201        if (VK_ATTACHMENT_UNUSED == pPrimary[index].attachment) return true;
2202    } else {  // Format and sample count must match
2203        if ((pPrimary[index].attachment == VK_ATTACHMENT_UNUSED) && (pSecondary[index].attachment == VK_ATTACHMENT_UNUSED)) {
2204            return true;
2205        } else if ((pPrimary[index].attachment == VK_ATTACHMENT_UNUSED) || (pSecondary[index].attachment == VK_ATTACHMENT_UNUSED)) {
2206            return false;
2207        }
2208        if ((pPrimaryAttachments[pPrimary[index].attachment].format ==
2209             pSecondaryAttachments[pSecondary[index].attachment].format) &&
2210            (pPrimaryAttachments[pPrimary[index].attachment].samples ==
2211             pSecondaryAttachments[pSecondary[index].attachment].samples))
2212            return true;
2213    }
2214    // Format and sample counts didn't match
2215    return false;
2216}
2217// TODO : Scrub verify_renderpass_compatibility() and validateRenderPassCompatibility() and unify them and/or share code
2218// For given primary RenderPass object and secondry RenderPassCreateInfo, verify that they're compatible
2219static bool verify_renderpass_compatibility(const layer_data *my_data, const VkRenderPassCreateInfo *primaryRPCI,
2220                                            const VkRenderPassCreateInfo *secondaryRPCI, string &errorMsg) {
2221    if (primaryRPCI->subpassCount != secondaryRPCI->subpassCount) {
2222        stringstream errorStr;
2223        errorStr << "RenderPass for primary cmdBuffer has " << primaryRPCI->subpassCount
2224                 << " subpasses but renderPass for secondary cmdBuffer has " << secondaryRPCI->subpassCount << " subpasses.";
2225        errorMsg = errorStr.str();
2226        return false;
2227    }
2228    uint32_t spIndex = 0;
2229    for (spIndex = 0; spIndex < primaryRPCI->subpassCount; ++spIndex) {
2230        // For each subpass, verify that corresponding color, input, resolve & depth/stencil attachment references are compatible
2231        uint32_t primaryColorCount = primaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2232        uint32_t secondaryColorCount = secondaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2233        uint32_t colorMax = std::max(primaryColorCount, secondaryColorCount);
2234        for (uint32_t cIdx = 0; cIdx < colorMax; ++cIdx) {
2235            if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pColorAttachments, primaryColorCount,
2236                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pColorAttachments,
2237                                                  secondaryColorCount, secondaryRPCI->pAttachments)) {
2238                stringstream errorStr;
2239                errorStr << "color attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2240                errorMsg = errorStr.str();
2241                return false;
2242            } else if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2243                                                         primaryColorCount, primaryRPCI->pAttachments,
2244                                                         secondaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2245                                                         secondaryColorCount, secondaryRPCI->pAttachments)) {
2246                stringstream errorStr;
2247                errorStr << "resolve attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2248                errorMsg = errorStr.str();
2249                return false;
2250            }
2251        }
2252
2253        if (!attachment_references_compatible(0, primaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment, 1,
2254                                              primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2255                                              1, secondaryRPCI->pAttachments)) {
2256            stringstream errorStr;
2257            errorStr << "depth/stencil attachments of subpass index " << spIndex << " are not compatible.";
2258            errorMsg = errorStr.str();
2259            return false;
2260        }
2261
2262        uint32_t primaryInputCount = primaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2263        uint32_t secondaryInputCount = secondaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2264        uint32_t inputMax = std::max(primaryInputCount, secondaryInputCount);
2265        for (uint32_t i = 0; i < inputMax; ++i) {
2266            if (!attachment_references_compatible(i, primaryRPCI->pSubpasses[spIndex].pInputAttachments, primaryColorCount,
2267                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pInputAttachments,
2268                                                  secondaryColorCount, secondaryRPCI->pAttachments)) {
2269                stringstream errorStr;
2270                errorStr << "input attachments at index " << i << " of subpass index " << spIndex << " are not compatible.";
2271                errorMsg = errorStr.str();
2272                return false;
2273            }
2274        }
2275    }
2276    return true;
2277}
2278
2279// For given cvdescriptorset::DescriptorSet, verify that its Set is compatible w/ the setLayout corresponding to
2280// pipelineLayout[layoutIndex]
2281static bool verify_set_layout_compatibility(layer_data *my_data, const cvdescriptorset::DescriptorSet *descriptor_set,
2282                                            PIPELINE_LAYOUT_NODE const *pipeline_layout, const uint32_t layoutIndex,
2283                                            string &errorMsg) {
2284    auto num_sets = pipeline_layout->set_layouts.size();
2285    if (layoutIndex >= num_sets) {
2286        stringstream errorStr;
2287        errorStr << "VkPipelineLayout (" << pipeline_layout->layout << ") only contains " << num_sets
2288                 << " setLayouts corresponding to sets 0-" << num_sets - 1 << ", but you're attempting to bind set to index "
2289                 << layoutIndex;
2290        errorMsg = errorStr.str();
2291        return false;
2292    }
2293    auto layout_node = pipeline_layout->set_layouts[layoutIndex];
2294    return descriptor_set->IsCompatible(layout_node, &errorMsg);
2295}
2296
2297// Validate that data for each specialization entry is fully contained within the buffer.
2298static bool validate_specialization_offsets(debug_report_data *report_data, VkPipelineShaderStageCreateInfo const *info) {
2299    bool pass = true;
2300
2301    VkSpecializationInfo const *spec = info->pSpecializationInfo;
2302
2303    if (spec) {
2304        for (auto i = 0u; i < spec->mapEntryCount; i++) {
2305            // TODO: This is a good place for VALIDATION_ERROR_00589.
2306            if (spec->pMapEntries[i].offset + spec->pMapEntries[i].size > spec->dataSize) {
2307                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 0, __LINE__,
2308                            VALIDATION_ERROR_00590, "SC",
2309                            "Specialization entry %u (for constant id %u) references memory outside provided "
2310                            "specialization data (bytes %u.." PRINTF_SIZE_T_SPECIFIER "; " PRINTF_SIZE_T_SPECIFIER
2311                            " bytes provided). %s.",
2312                            i, spec->pMapEntries[i].constantID, spec->pMapEntries[i].offset,
2313                            spec->pMapEntries[i].offset + spec->pMapEntries[i].size - 1, spec->dataSize,
2314                            validation_error_map[VALIDATION_ERROR_00590])) {
2315                    pass = false;
2316                }
2317            }
2318        }
2319    }
2320
2321    return pass;
2322}
2323
2324static bool descriptor_type_match(shader_module const *module, uint32_t type_id, VkDescriptorType descriptor_type,
2325                                  unsigned &descriptor_count) {
2326    auto type = module->get_def(type_id);
2327
2328    descriptor_count = 1;
2329
2330    // Strip off any array or ptrs. Where we remove array levels, adjust the  descriptor count for each dimension.
2331    while (type.opcode() == spv::OpTypeArray || type.opcode() == spv::OpTypePointer) {
2332        if (type.opcode() == spv::OpTypeArray) {
2333            descriptor_count *= get_constant_value(module, type.word(3));
2334            type = module->get_def(type.word(2));
2335        } else {
2336            type = module->get_def(type.word(3));
2337        }
2338    }
2339
2340    switch (type.opcode()) {
2341        case spv::OpTypeStruct: {
2342            for (auto insn : *module) {
2343                if (insn.opcode() == spv::OpDecorate && insn.word(1) == type.word(1)) {
2344                    if (insn.word(2) == spv::DecorationBlock) {
2345                        return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
2346                               descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
2347                    } else if (insn.word(2) == spv::DecorationBufferBlock) {
2348                        return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
2349                               descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
2350                    }
2351                }
2352            }
2353
2354            // Invalid
2355            return false;
2356        }
2357
2358        case spv::OpTypeSampler:
2359            return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLER || descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
2360
2361        case spv::OpTypeSampledImage:
2362            if (descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) {
2363                // Slight relaxation for some GLSL historical madness: samplerBuffer doesn't really have a sampler, and a texel
2364                // buffer descriptor doesn't really provide one. Allow this slight mismatch.
2365                auto image_type = module->get_def(type.word(2));
2366                auto dim = image_type.word(3);
2367                auto sampled = image_type.word(7);
2368                return dim == spv::DimBuffer && sampled == 1;
2369            }
2370            return descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
2371
2372        case spv::OpTypeImage: {
2373            // Many descriptor types backing image types-- depends on dimension and whether the image will be used with a sampler.
2374            // SPIRV for Vulkan requires that sampled be 1 or 2 -- leaving the decision to runtime is unacceptable.
2375            auto dim = type.word(3);
2376            auto sampled = type.word(7);
2377
2378            if (dim == spv::DimSubpassData) {
2379                return descriptor_type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
2380            } else if (dim == spv::DimBuffer) {
2381                if (sampled == 1) {
2382                    return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
2383                } else {
2384                    return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
2385                }
2386            } else if (sampled == 1) {
2387                return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE ||
2388                       descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
2389            } else {
2390                return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
2391            }
2392        }
2393
2394        // We shouldn't really see any other junk types -- but if we do, they're a mismatch.
2395        default:
2396            return false;  // Mismatch
2397    }
2398}
2399
2400static bool require_feature(debug_report_data *report_data, VkBool32 feature, char const *feature_name) {
2401    if (!feature) {
2402        if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2403                    SHADER_CHECKER_FEATURE_NOT_ENABLED, "SC",
2404                    "Shader requires VkPhysicalDeviceFeatures::%s but is not "
2405                    "enabled on the device",
2406                    feature_name)) {
2407            return false;
2408        }
2409    }
2410
2411    return true;
2412}
2413
2414static bool validate_shader_capabilities(debug_report_data *report_data, shader_module const *src,
2415                                         VkPhysicalDeviceFeatures const *enabledFeatures) {
2416    bool pass = true;
2417
2418    for (auto insn : *src) {
2419        if (insn.opcode() == spv::OpCapability) {
2420            switch (insn.word(1)) {
2421                case spv::CapabilityMatrix:
2422                case spv::CapabilityShader:
2423                case spv::CapabilityInputAttachment:
2424                case spv::CapabilitySampled1D:
2425                case spv::CapabilityImage1D:
2426                case spv::CapabilitySampledBuffer:
2427                case spv::CapabilityImageBuffer:
2428                case spv::CapabilityImageQuery:
2429                case spv::CapabilityDerivativeControl:
2430                    // Always supported by a Vulkan 1.0 implementation -- no feature bits.
2431                    break;
2432
2433                case spv::CapabilityGeometry:
2434                    pass &= require_feature(report_data, enabledFeatures->geometryShader, "geometryShader");
2435                    break;
2436
2437                case spv::CapabilityTessellation:
2438                    pass &= require_feature(report_data, enabledFeatures->tessellationShader, "tessellationShader");
2439                    break;
2440
2441                case spv::CapabilityFloat64:
2442                    pass &= require_feature(report_data, enabledFeatures->shaderFloat64, "shaderFloat64");
2443                    break;
2444
2445                case spv::CapabilityInt64:
2446                    pass &= require_feature(report_data, enabledFeatures->shaderInt64, "shaderInt64");
2447                    break;
2448
2449                case spv::CapabilityTessellationPointSize:
2450                case spv::CapabilityGeometryPointSize:
2451                    pass &= require_feature(report_data, enabledFeatures->shaderTessellationAndGeometryPointSize,
2452                                            "shaderTessellationAndGeometryPointSize");
2453                    break;
2454
2455                case spv::CapabilityImageGatherExtended:
2456                    pass &= require_feature(report_data, enabledFeatures->shaderImageGatherExtended, "shaderImageGatherExtended");
2457                    break;
2458
2459                case spv::CapabilityStorageImageMultisample:
2460                    pass &= require_feature(report_data, enabledFeatures->shaderStorageImageMultisample,
2461                                            "shaderStorageImageMultisample");
2462                    break;
2463
2464                case spv::CapabilityUniformBufferArrayDynamicIndexing:
2465                    pass &= require_feature(report_data, enabledFeatures->shaderUniformBufferArrayDynamicIndexing,
2466                                            "shaderUniformBufferArrayDynamicIndexing");
2467                    break;
2468
2469                case spv::CapabilitySampledImageArrayDynamicIndexing:
2470                    pass &= require_feature(report_data, enabledFeatures->shaderSampledImageArrayDynamicIndexing,
2471                                            "shaderSampledImageArrayDynamicIndexing");
2472                    break;
2473
2474                case spv::CapabilityStorageBufferArrayDynamicIndexing:
2475                    pass &= require_feature(report_data, enabledFeatures->shaderStorageBufferArrayDynamicIndexing,
2476                                            "shaderStorageBufferArrayDynamicIndexing");
2477                    break;
2478
2479                case spv::CapabilityStorageImageArrayDynamicIndexing:
2480                    pass &= require_feature(report_data, enabledFeatures->shaderStorageImageArrayDynamicIndexing,
2481                                            "shaderStorageImageArrayDynamicIndexing");
2482                    break;
2483
2484                case spv::CapabilityClipDistance:
2485                    pass &= require_feature(report_data, enabledFeatures->shaderClipDistance, "shaderClipDistance");
2486                    break;
2487
2488                case spv::CapabilityCullDistance:
2489                    pass &= require_feature(report_data, enabledFeatures->shaderCullDistance, "shaderCullDistance");
2490                    break;
2491
2492                case spv::CapabilityImageCubeArray:
2493                    pass &= require_feature(report_data, enabledFeatures->imageCubeArray, "imageCubeArray");
2494                    break;
2495
2496                case spv::CapabilitySampleRateShading:
2497                    pass &= require_feature(report_data, enabledFeatures->sampleRateShading, "sampleRateShading");
2498                    break;
2499
2500                case spv::CapabilitySparseResidency:
2501                    pass &= require_feature(report_data, enabledFeatures->shaderResourceResidency, "shaderResourceResidency");
2502                    break;
2503
2504                case spv::CapabilityMinLod:
2505                    pass &= require_feature(report_data, enabledFeatures->shaderResourceMinLod, "shaderResourceMinLod");
2506                    break;
2507
2508                case spv::CapabilitySampledCubeArray:
2509                    pass &= require_feature(report_data, enabledFeatures->imageCubeArray, "imageCubeArray");
2510                    break;
2511
2512                case spv::CapabilityImageMSArray:
2513                    pass &= require_feature(report_data, enabledFeatures->shaderStorageImageMultisample,
2514                                            "shaderStorageImageMultisample");
2515                    break;
2516
2517                case spv::CapabilityStorageImageExtendedFormats:
2518                    pass &= require_feature(report_data, enabledFeatures->shaderStorageImageExtendedFormats,
2519                                            "shaderStorageImageExtendedFormats");
2520                    break;
2521
2522                case spv::CapabilityInterpolationFunction:
2523                    pass &= require_feature(report_data, enabledFeatures->sampleRateShading, "sampleRateShading");
2524                    break;
2525
2526                case spv::CapabilityStorageImageReadWithoutFormat:
2527                    pass &= require_feature(report_data, enabledFeatures->shaderStorageImageReadWithoutFormat,
2528                                            "shaderStorageImageReadWithoutFormat");
2529                    break;
2530
2531                case spv::CapabilityStorageImageWriteWithoutFormat:
2532                    pass &= require_feature(report_data, enabledFeatures->shaderStorageImageWriteWithoutFormat,
2533                                            "shaderStorageImageWriteWithoutFormat");
2534                    break;
2535
2536                case spv::CapabilityMultiViewport:
2537                    pass &= require_feature(report_data, enabledFeatures->multiViewport, "multiViewport");
2538                    break;
2539
2540                default:
2541                    if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2542                                SHADER_CHECKER_BAD_CAPABILITY, "SC", "Shader declares capability %u, not supported in Vulkan.",
2543                                insn.word(1)))
2544                        pass = false;
2545                    break;
2546            }
2547        }
2548    }
2549
2550    return pass;
2551}
2552
2553static uint32_t descriptor_type_to_reqs(shader_module const *module, uint32_t type_id) {
2554    auto type = module->get_def(type_id);
2555
2556    while (true) {
2557        switch (type.opcode()) {
2558            case spv::OpTypeArray:
2559            case spv::OpTypeSampledImage:
2560                type = module->get_def(type.word(2));
2561                break;
2562            case spv::OpTypePointer:
2563                type = module->get_def(type.word(3));
2564                break;
2565            case spv::OpTypeImage: {
2566                auto dim = type.word(3);
2567                auto arrayed = type.word(5);
2568                auto msaa = type.word(6);
2569
2570                switch (dim) {
2571                    case spv::Dim1D:
2572                        return arrayed ? DESCRIPTOR_REQ_VIEW_TYPE_1D_ARRAY : DESCRIPTOR_REQ_VIEW_TYPE_1D;
2573                    case spv::Dim2D:
2574                        return (msaa ? DESCRIPTOR_REQ_MULTI_SAMPLE : DESCRIPTOR_REQ_SINGLE_SAMPLE) |
2575                               (arrayed ? DESCRIPTOR_REQ_VIEW_TYPE_2D_ARRAY : DESCRIPTOR_REQ_VIEW_TYPE_2D);
2576                    case spv::Dim3D:
2577                        return DESCRIPTOR_REQ_VIEW_TYPE_3D;
2578                    case spv::DimCube:
2579                        return arrayed ? DESCRIPTOR_REQ_VIEW_TYPE_CUBE_ARRAY : DESCRIPTOR_REQ_VIEW_TYPE_CUBE;
2580                    case spv::DimSubpassData:
2581                        return msaa ? DESCRIPTOR_REQ_MULTI_SAMPLE : DESCRIPTOR_REQ_SINGLE_SAMPLE;
2582                    default:  // buffer, etc.
2583                        return 0;
2584                }
2585            }
2586            default:
2587                return 0;
2588        }
2589    }
2590}
2591
2592static bool validate_pipeline_shader_stage(
2593    debug_report_data *report_data, VkPipelineShaderStageCreateInfo const *pStage, PIPELINE_STATE *pipeline,
2594    shader_module **out_module, spirv_inst_iter *out_entrypoint, VkPhysicalDeviceFeatures const *enabledFeatures,
2595    std::unordered_map<VkShaderModule, std::unique_ptr<shader_module>> const &shaderModuleMap) {
2596    bool pass = true;
2597    auto module_it = shaderModuleMap.find(pStage->module);
2598    auto module = *out_module = module_it->second.get();
2599
2600    if (!module->has_valid_spirv) return pass;
2601
2602    // Find the entrypoint
2603    auto entrypoint = *out_entrypoint = find_entrypoint(module, pStage->pName, pStage->stage);
2604    if (entrypoint == module->end()) {
2605        if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__, VALIDATION_ERROR_00510,
2606                    "SC", "No entrypoint found named `%s` for stage %s. %s.", pStage->pName,
2607                    string_VkShaderStageFlagBits(pStage->stage), validation_error_map[VALIDATION_ERROR_00510])) {
2608            return false;  // no point continuing beyond here, any analysis is just going to be garbage.
2609        }
2610    }
2611
2612    // Validate shader capabilities against enabled device features
2613    pass &= validate_shader_capabilities(report_data, module, enabledFeatures);
2614
2615    // Mark accessible ids
2616    auto accessible_ids = mark_accessible_ids(module, entrypoint);
2617
2618    // Validate descriptor set layout against what the entrypoint actually uses
2619    auto descriptor_uses = collect_interface_by_descriptor_slot(report_data, module, accessible_ids);
2620
2621    auto pipelineLayout = pipeline->pipeline_layout;
2622
2623    pass &= validate_specialization_offsets(report_data, pStage);
2624    pass &= validate_push_constant_usage(report_data, &pipelineLayout.push_constant_ranges, module, accessible_ids, pStage->stage);
2625
2626    // Validate descriptor use
2627    for (auto use : descriptor_uses) {
2628        // While validating shaders capture which slots are used by the pipeline
2629        auto &reqs = pipeline->active_slots[use.first.first][use.first.second];
2630        reqs = descriptor_req(reqs | descriptor_type_to_reqs(module, use.second.type_id));
2631
2632        // Verify given pipelineLayout has requested setLayout with requested binding
2633        const auto &binding = get_descriptor_binding(&pipelineLayout, use.first);
2634        unsigned required_descriptor_count;
2635
2636        if (!binding) {
2637            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2638                        SHADER_CHECKER_MISSING_DESCRIPTOR, "SC",
2639                        "Shader uses descriptor slot %u.%u (used as type `%s`) but not declared in pipeline layout",
2640                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str())) {
2641                pass = false;
2642            }
2643        } else if (~binding->stageFlags & pStage->stage) {
2644            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 0, __LINE__,
2645                        SHADER_CHECKER_DESCRIPTOR_NOT_ACCESSIBLE_FROM_STAGE, "SC",
2646                        "Shader uses descriptor slot %u.%u (used "
2647                        "as type `%s`) but descriptor not "
2648                        "accessible from stage %s",
2649                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str(),
2650                        string_VkShaderStageFlagBits(pStage->stage))) {
2651                pass = false;
2652            }
2653        } else if (!descriptor_type_match(module, use.second.type_id, binding->descriptorType, required_descriptor_count)) {
2654            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2655                        SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC",
2656                        "Type mismatch on descriptor slot "
2657                        "%u.%u (used as type `%s`) but "
2658                        "descriptor of type %s",
2659                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str(),
2660                        string_VkDescriptorType(binding->descriptorType))) {
2661                pass = false;
2662            }
2663        } else if (binding->descriptorCount < required_descriptor_count) {
2664            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2665                        SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC",
2666                        "Shader expects at least %u descriptors for binding %u.%u (used as type `%s`) but only %u provided",
2667                        required_descriptor_count, use.first.first, use.first.second,
2668                        describe_type(module, use.second.type_id).c_str(), binding->descriptorCount)) {
2669                pass = false;
2670            }
2671        }
2672    }
2673
2674    // Validate use of input attachments against subpass structure
2675    if (pStage->stage == VK_SHADER_STAGE_FRAGMENT_BIT) {
2676        auto input_attachment_uses = collect_interface_by_input_attachment_index(report_data, module, accessible_ids);
2677
2678        auto rpci = pipeline->render_pass_ci.ptr();
2679        auto subpass = pipeline->graphicsPipelineCI.subpass;
2680
2681        for (auto use : input_attachment_uses) {
2682            auto input_attachments = rpci->pSubpasses[subpass].pInputAttachments;
2683            auto index = (input_attachments && use.first < rpci->pSubpasses[subpass].inputAttachmentCount)
2684                             ? input_attachments[use.first].attachment
2685                             : VK_ATTACHMENT_UNUSED;
2686
2687            if (index == VK_ATTACHMENT_UNUSED) {
2688                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2689                            SHADER_CHECKER_MISSING_INPUT_ATTACHMENT, "SC",
2690                            "Shader consumes input attachment index %d but not provided in subpass", use.first)) {
2691                    pass = false;
2692                }
2693            } else if (get_format_type(rpci->pAttachments[index].format) != get_fundamental_type(module, use.second.type_id)) {
2694                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2695                            SHADER_CHECKER_INPUT_ATTACHMENT_TYPE_MISMATCH, "SC",
2696                            "Subpass input attachment %u format of %s does not match type used in shader `%s`", use.first,
2697                            string_VkFormat(rpci->pAttachments[index].format), describe_type(module, use.second.type_id).c_str())) {
2698                    pass = false;
2699                }
2700            }
2701        }
2702    }
2703
2704    return pass;
2705}
2706
2707// Validate that the shaders used by the given pipeline and store the active_slots
2708//  that are actually used by the pipeline into pPipeline->active_slots
2709static bool validate_and_capture_pipeline_shader_state(
2710    debug_report_data *report_data, PIPELINE_STATE *pPipeline, VkPhysicalDeviceFeatures const *enabledFeatures,
2711    std::unordered_map<VkShaderModule, unique_ptr<shader_module>> const &shaderModuleMap) {
2712    auto pCreateInfo = pPipeline->graphicsPipelineCI.ptr();
2713    int vertex_stage = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2714    int fragment_stage = get_shader_stage_id(VK_SHADER_STAGE_FRAGMENT_BIT);
2715
2716    shader_module *shaders[5];
2717    memset(shaders, 0, sizeof(shaders));
2718    spirv_inst_iter entrypoints[5];
2719    memset(entrypoints, 0, sizeof(entrypoints));
2720    VkPipelineVertexInputStateCreateInfo const *vi = 0;
2721    bool pass = true;
2722
2723    for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
2724        auto pStage = &pCreateInfo->pStages[i];
2725        auto stage_id = get_shader_stage_id(pStage->stage);
2726        pass &= validate_pipeline_shader_stage(report_data, pStage, pPipeline, &shaders[stage_id], &entrypoints[stage_id],
2727                                               enabledFeatures, shaderModuleMap);
2728    }
2729
2730    // if the shader stages are no good individually, cross-stage validation is pointless.
2731    if (!pass) return false;
2732
2733    vi = pCreateInfo->pVertexInputState;
2734
2735    if (vi) {
2736        pass &= validate_vi_consistency(report_data, vi);
2737    }
2738
2739    if (shaders[vertex_stage] && shaders[vertex_stage]->has_valid_spirv) {
2740        pass &= validate_vi_against_vs_inputs(report_data, vi, shaders[vertex_stage], entrypoints[vertex_stage]);
2741    }
2742
2743    int producer = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2744    int consumer = get_shader_stage_id(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT);
2745
2746    while (!shaders[producer] && producer != fragment_stage) {
2747        producer++;
2748        consumer++;
2749    }
2750
2751    for (; producer != fragment_stage && consumer <= fragment_stage; consumer++) {
2752        assert(shaders[producer]);
2753        if (shaders[consumer] && shaders[consumer]->has_valid_spirv && shaders[producer]->has_valid_spirv) {
2754            pass &= validate_interface_between_stages(report_data, shaders[producer], entrypoints[producer],
2755                                                      &shader_stage_attribs[producer], shaders[consumer], entrypoints[consumer],
2756                                                      &shader_stage_attribs[consumer]);
2757
2758            producer = consumer;
2759        }
2760    }
2761
2762    if (shaders[fragment_stage] && shaders[fragment_stage]->has_valid_spirv) {
2763        pass &= validate_fs_outputs_against_render_pass(report_data, shaders[fragment_stage], entrypoints[fragment_stage],
2764                                                        pPipeline->render_pass_ci.ptr(), pCreateInfo->subpass);
2765    }
2766
2767    return pass;
2768}
2769
2770static bool validate_compute_pipeline(debug_report_data *report_data, PIPELINE_STATE *pPipeline,
2771                                      VkPhysicalDeviceFeatures const *enabledFeatures,
2772                                      std::unordered_map<VkShaderModule, unique_ptr<shader_module>> const &shaderModuleMap) {
2773    auto pCreateInfo = pPipeline->computePipelineCI.ptr();
2774
2775    shader_module *module;
2776    spirv_inst_iter entrypoint;
2777
2778    return validate_pipeline_shader_stage(report_data, &pCreateInfo->stage, pPipeline, &module, &entrypoint, enabledFeatures,
2779                                          shaderModuleMap);
2780}
2781// Return Set node ptr for specified set or else NULL
2782cvdescriptorset::DescriptorSet *getSetNode(const layer_data *my_data, VkDescriptorSet set) {
2783    auto set_it = my_data->setMap.find(set);
2784    if (set_it == my_data->setMap.end()) {
2785        return NULL;
2786    }
2787    return set_it->second;
2788}
2789
2790// For given pipeline, return number of MSAA samples, or one if MSAA disabled
2791static VkSampleCountFlagBits getNumSamples(PIPELINE_STATE const *pipe) {
2792    if (pipe->graphicsPipelineCI.pMultisampleState != NULL &&
2793        VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO == pipe->graphicsPipelineCI.pMultisampleState->sType) {
2794        return pipe->graphicsPipelineCI.pMultisampleState->rasterizationSamples;
2795    }
2796    return VK_SAMPLE_COUNT_1_BIT;
2797}
2798
2799static void list_bits(std::ostream &s, uint32_t bits) {
2800    for (int i = 0; i < 32 && bits; i++) {
2801        if (bits & (1 << i)) {
2802            s << i;
2803            bits &= ~(1 << i);
2804            if (bits) {
2805                s << ",";
2806            }
2807        }
2808    }
2809}
2810
2811// Validate draw-time state related to the PSO
2812static bool ValidatePipelineDrawtimeState(layer_data const *my_data, LAST_BOUND_STATE const &state, const GLOBAL_CB_NODE *pCB,
2813                                          PIPELINE_STATE const *pPipeline) {
2814    bool skip_call = false;
2815
2816    // Verify vertex binding
2817    if (pPipeline->vertexBindingDescriptions.size() > 0) {
2818        for (size_t i = 0; i < pPipeline->vertexBindingDescriptions.size(); i++) {
2819            auto vertex_binding = pPipeline->vertexBindingDescriptions[i].binding;
2820            if ((pCB->currentDrawData.buffers.size() < (vertex_binding + 1)) ||
2821                (pCB->currentDrawData.buffers[vertex_binding] == VK_NULL_HANDLE)) {
2822                skip_call |=
2823                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
2824                            DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
2825                            "The Pipeline State Object (0x%" PRIxLEAST64
2826                            ") expects that this Command Buffer's vertex binding Index %u "
2827                            "should be set via vkCmdBindVertexBuffers. This is because VkVertexInputBindingDescription struct "
2828                            "at index " PRINTF_SIZE_T_SPECIFIER " of pVertexBindingDescriptions has a binding value of %u.",
2829                            (uint64_t)state.pipeline_state->pipeline, vertex_binding, i, vertex_binding);
2830            }
2831        }
2832    } else {
2833        if (!pCB->currentDrawData.buffers.empty() && !pCB->vertex_buffer_used) {
2834            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
2835                                 0, __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
2836                                 "Vertex buffers are bound to command buffer (0x%p"
2837                                 ") but no vertex buffers are attached to this Pipeline State Object (0x%" PRIxLEAST64 ").",
2838                                 pCB->commandBuffer, (uint64_t)state.pipeline_state->pipeline);
2839        }
2840    }
2841    // If Viewport or scissors are dynamic, verify that dynamic count matches PSO count.
2842    // Skip check if rasterization is disabled or there is no viewport.
2843    if ((!pPipeline->graphicsPipelineCI.pRasterizationState ||
2844         (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) &&
2845        pPipeline->graphicsPipelineCI.pViewportState) {
2846        bool dynViewport = isDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
2847        bool dynScissor = isDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
2848
2849        if (dynViewport) {
2850            auto requiredViewportsMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->viewportCount) - 1;
2851            auto missingViewportMask = ~pCB->viewportMask & requiredViewportsMask;
2852            if (missingViewportMask) {
2853                std::stringstream ss;
2854                ss << "Dynamic viewport(s) ";
2855                list_bits(ss, missingViewportMask);
2856                ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetViewport().";
2857                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2858                                     __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", "%s", ss.str().c_str());
2859            }
2860        }
2861
2862        if (dynScissor) {
2863            auto requiredScissorMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->scissorCount) - 1;
2864            auto missingScissorMask = ~pCB->scissorMask & requiredScissorMask;
2865            if (missingScissorMask) {
2866                std::stringstream ss;
2867                ss << "Dynamic scissor(s) ";
2868                list_bits(ss, missingScissorMask);
2869                ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetScissor().";
2870                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2871                                     __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", "%s", ss.str().c_str());
2872            }
2873        }
2874    }
2875
2876    // Verify that any MSAA request in PSO matches sample# in bound FB
2877    // Skip the check if rasterization is disabled.
2878    if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
2879        (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
2880        VkSampleCountFlagBits pso_num_samples = getNumSamples(pPipeline);
2881        if (pCB->activeRenderPass) {
2882            auto const render_pass_info = pCB->activeRenderPass->createInfo.ptr();
2883            const VkSubpassDescription *subpass_desc = &render_pass_info->pSubpasses[pCB->activeSubpass];
2884            uint32_t i;
2885            unsigned subpass_num_samples = 0;
2886
2887            for (i = 0; i < subpass_desc->colorAttachmentCount; i++) {
2888                auto attachment = subpass_desc->pColorAttachments[i].attachment;
2889                if (attachment != VK_ATTACHMENT_UNUSED)
2890                    subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
2891            }
2892
2893            if (subpass_desc->pDepthStencilAttachment &&
2894                subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
2895                auto attachment = subpass_desc->pDepthStencilAttachment->attachment;
2896                subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
2897            }
2898
2899            if (subpass_num_samples && static_cast<unsigned>(pso_num_samples) != subpass_num_samples) {
2900                skip_call |=
2901                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
2902                            reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
2903                            "Num samples mismatch! At draw-time in Pipeline (0x%" PRIxLEAST64
2904                            ") with %u samples while current RenderPass (0x%" PRIxLEAST64 ") w/ %u samples!",
2905                            reinterpret_cast<const uint64_t &>(pPipeline->pipeline), pso_num_samples,
2906                            reinterpret_cast<const uint64_t &>(pCB->activeRenderPass->renderPass), subpass_num_samples);
2907            }
2908        } else {
2909            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
2910                                 reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH,
2911                                 "DS", "No active render pass found at draw-time in Pipeline (0x%" PRIxLEAST64 ")!",
2912                                 reinterpret_cast<const uint64_t &>(pPipeline->pipeline));
2913        }
2914    }
2915    // Verify that PSO creation renderPass is compatible with active renderPass
2916    if (pCB->activeRenderPass) {
2917        std::string err_string;
2918        if ((pCB->activeRenderPass->renderPass != pPipeline->graphicsPipelineCI.renderPass) &&
2919            !verify_renderpass_compatibility(my_data, pCB->activeRenderPass->createInfo.ptr(), pPipeline->render_pass_ci.ptr(),
2920                                             err_string)) {
2921            // renderPass that PSO was created with must be compatible with active renderPass that PSO is being used with
2922            skip_call |=
2923                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
2924                        reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
2925                        "At Draw time the active render pass (0x%" PRIxLEAST64
2926                        ") is incompatible w/ gfx pipeline "
2927                        "(0x%" PRIxLEAST64 ") that was created w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
2928                        reinterpret_cast<uint64_t &>(pCB->activeRenderPass->renderPass),
2929                        reinterpret_cast<uint64_t const &>(pPipeline->pipeline),
2930                        reinterpret_cast<const uint64_t &>(pPipeline->graphicsPipelineCI.renderPass), err_string.c_str());
2931        }
2932
2933        if (pPipeline->graphicsPipelineCI.subpass != pCB->activeSubpass) {
2934            skip_call |=
2935                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
2936                        reinterpret_cast<uint64_t const &>(pPipeline->pipeline), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
2937                        "Pipeline was built for subpass %u but used in subpass %u", pPipeline->graphicsPipelineCI.subpass,
2938                        pCB->activeSubpass);
2939        }
2940    }
2941    // TODO : Add more checks here
2942
2943    return skip_call;
2944}
2945
2946// Validate overall state at the time of a draw call
2947static bool ValidateDrawState(layer_data *my_data, GLOBAL_CB_NODE *cb_node, const bool indexed,
2948                              const VkPipelineBindPoint bind_point, const char *function,
2949                              UNIQUE_VALIDATION_ERROR_CODE const msg_code) {
2950    bool result = false;
2951    auto const &state = cb_node->lastBound[bind_point];
2952    PIPELINE_STATE *pPipe = state.pipeline_state;
2953    if (nullptr == pPipe) {
2954        result |= log_msg(
2955            my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2956            DRAWSTATE_INVALID_PIPELINE, "DS",
2957            "At Draw/Dispatch time no valid VkPipeline is bound! This is illegal. Please bind one with vkCmdBindPipeline().");
2958        // Early return as any further checks below will be busted w/o a pipeline
2959        if (result) return true;
2960    }
2961    // First check flag states
2962    if (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point)
2963        result = validate_draw_state_flags(my_data, cb_node, pPipe, indexed, msg_code);
2964
2965    // Now complete other state checks
2966    if (VK_NULL_HANDLE != state.pipeline_layout.layout) {
2967        string errorString;
2968        auto pipeline_layout = pPipe->pipeline_layout;
2969
2970        for (const auto &set_binding_pair : pPipe->active_slots) {
2971            uint32_t setIndex = set_binding_pair.first;
2972            // If valid set is not bound throw an error
2973            if ((state.boundDescriptorSets.size() <= setIndex) || (!state.boundDescriptorSets[setIndex])) {
2974                result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
2975                                  DRAWSTATE_DESCRIPTOR_SET_NOT_BOUND, "DS",
2976                                  "VkPipeline 0x%" PRIxLEAST64 " uses set #%u but that set is not bound.",
2977                                  (uint64_t)pPipe->pipeline, setIndex);
2978            } else if (!verify_set_layout_compatibility(my_data, state.boundDescriptorSets[setIndex], &pipeline_layout, setIndex,
2979                                                        errorString)) {
2980                // Set is bound but not compatible w/ overlapping pipeline_layout from PSO
2981                VkDescriptorSet setHandle = state.boundDescriptorSets[setIndex]->GetSet();
2982                result |=
2983                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2984                            (uint64_t)setHandle, __LINE__, DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
2985                            "VkDescriptorSet (0x%" PRIxLEAST64
2986                            ") bound as set #%u is not compatible with overlapping VkPipelineLayout 0x%" PRIxLEAST64 " due to: %s",
2987                            reinterpret_cast<uint64_t &>(setHandle), setIndex, reinterpret_cast<uint64_t &>(pipeline_layout.layout),
2988                            errorString.c_str());
2989            } else {  // Valid set is bound and layout compatible, validate that it's updated
2990                // Pull the set node
2991                cvdescriptorset::DescriptorSet *descriptor_set = state.boundDescriptorSets[setIndex];
2992                // Gather active bindings
2993                std::unordered_set<uint32_t> active_bindings;
2994                for (auto binding : set_binding_pair.second) {
2995                    active_bindings.insert(binding.first);
2996                }
2997                // Make sure set has been updated if it has no immutable samplers
2998                //  If it has immutable samplers, we'll flag error later as needed depending on binding
2999                if (!descriptor_set->IsUpdated()) {
3000                    for (auto binding : active_bindings) {
3001                        if (!descriptor_set->GetImmutableSamplerPtrFromBinding(binding)) {
3002                            result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
3003                                              VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)descriptor_set->GetSet(),
3004                                              __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
3005                                              "Descriptor Set 0x%" PRIxLEAST64
3006                                              " bound but was never updated. It is now being used to draw so "
3007                                              "this will result in undefined behavior.",
3008                                              (uint64_t)descriptor_set->GetSet());
3009                        }
3010                    }
3011                }
3012                // Validate the draw-time state for this descriptor set
3013                std::string err_str;
3014                if (!descriptor_set->ValidateDrawState(set_binding_pair.second, state.dynamicOffsets[setIndex], &err_str)) {
3015                    auto set = descriptor_set->GetSet();
3016                    result |=
3017                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3018                                reinterpret_cast<const uint64_t &>(set), __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
3019                                "Descriptor set 0x%" PRIxLEAST64 " encountered the following validation error at %s() time: %s",
3020                                reinterpret_cast<const uint64_t &>(set), function, err_str.c_str());
3021                }
3022            }
3023        }
3024    }
3025
3026    // Check general pipeline state that needs to be validated at drawtime
3027    if (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point) result |= ValidatePipelineDrawtimeState(my_data, state, cb_node, pPipe);
3028
3029    return result;
3030}
3031
3032static void UpdateDrawState(layer_data *my_data, GLOBAL_CB_NODE *cb_state, const VkPipelineBindPoint bind_point) {
3033    auto const &state = cb_state->lastBound[bind_point];
3034    PIPELINE_STATE *pPipe = state.pipeline_state;
3035    if (VK_NULL_HANDLE != state.pipeline_layout.layout) {
3036        for (const auto &set_binding_pair : pPipe->active_slots) {
3037            uint32_t setIndex = set_binding_pair.first;
3038            // Pull the set node
3039            cvdescriptorset::DescriptorSet *descriptor_set = state.boundDescriptorSets[setIndex];
3040            // Bind this set and its active descriptor resources to the command buffer
3041            descriptor_set->BindCommandBuffer(cb_state, set_binding_pair.second);
3042            // For given active slots record updated images & buffers
3043            descriptor_set->GetStorageUpdates(set_binding_pair.second, &cb_state->updateBuffers, &cb_state->updateImages);
3044        }
3045    }
3046    if (pPipe->vertexBindingDescriptions.size() > 0) {
3047        cb_state->vertex_buffer_used = true;
3048    }
3049}
3050
3051// Validate HW line width capabilities prior to setting requested line width.
3052static bool verifyLineWidth(layer_data *my_data, DRAW_STATE_ERROR dsError, const uint64_t &target, float lineWidth) {
3053    bool skip_call = false;
3054
3055    // First check to see if the physical device supports wide lines.
3056    if ((VK_FALSE == my_data->enabled_features.wideLines) && (1.0f != lineWidth)) {
3057        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, target, __LINE__,
3058                             dsError, "DS",
3059                             "Attempt to set lineWidth to %f but physical device wideLines feature "
3060                             "not supported/enabled so lineWidth must be 1.0f!",
3061                             lineWidth);
3062    } else {
3063        // Otherwise, make sure the width falls in the valid range.
3064        if ((my_data->phys_dev_properties.properties.limits.lineWidthRange[0] > lineWidth) ||
3065            (my_data->phys_dev_properties.properties.limits.lineWidthRange[1] < lineWidth)) {
3066            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, target,
3067                                 __LINE__, dsError, "DS",
3068                                 "Attempt to set lineWidth to %f but physical device limits line width "
3069                                 "to between [%f, %f]!",
3070                                 lineWidth, my_data->phys_dev_properties.properties.limits.lineWidthRange[0],
3071                                 my_data->phys_dev_properties.properties.limits.lineWidthRange[1]);
3072        }
3073    }
3074
3075    return skip_call;
3076}
3077
3078// Verify that create state for a pipeline is valid
3079static bool verifyPipelineCreateState(layer_data *my_data, std::vector<PIPELINE_STATE *> pPipelines, int pipelineIndex) {
3080    bool skip_call = false;
3081
3082    PIPELINE_STATE *pPipeline = pPipelines[pipelineIndex];
3083
3084    // If create derivative bit is set, check that we've specified a base
3085    // pipeline correctly, and that the base pipeline was created to allow
3086    // derivatives.
3087    if (pPipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
3088        PIPELINE_STATE *pBasePipeline = nullptr;
3089        if (!((pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) ^
3090              (pPipeline->graphicsPipelineCI.basePipelineIndex != -1))) {
3091            // This check is a superset of VALIDATION_ERROR_00526 and VALIDATION_ERROR_00528
3092            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3093                                 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3094                                 "Invalid Pipeline CreateInfo: exactly one of base pipeline index and handle must be specified");
3095        } else if (pPipeline->graphicsPipelineCI.basePipelineIndex != -1) {
3096            if (pPipeline->graphicsPipelineCI.basePipelineIndex >= pipelineIndex) {
3097                skip_call |=
3098                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3099                            VALIDATION_ERROR_00518, "DS",
3100                            "Invalid Pipeline CreateInfo: base pipeline must occur earlier in array than derivative pipeline. %s",
3101                            validation_error_map[VALIDATION_ERROR_00518]);
3102            } else {
3103                pBasePipeline = pPipelines[pPipeline->graphicsPipelineCI.basePipelineIndex];
3104            }
3105        } else if (pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) {
3106            pBasePipeline = getPipelineState(my_data, pPipeline->graphicsPipelineCI.basePipelineHandle);
3107        }
3108
3109        if (pBasePipeline && !(pBasePipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) {
3110            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3111                                 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3112                                 "Invalid Pipeline CreateInfo: base pipeline does not allow derivatives.");
3113        }
3114    }
3115
3116    if (pPipeline->graphicsPipelineCI.pColorBlendState != NULL) {
3117        const safe_VkPipelineColorBlendStateCreateInfo *color_blend_state = pPipeline->graphicsPipelineCI.pColorBlendState;
3118        auto const render_pass_info = getRenderPassState(my_data, pPipeline->graphicsPipelineCI.renderPass)->createInfo.ptr();
3119        const VkSubpassDescription *subpass_desc = &render_pass_info->pSubpasses[pPipeline->graphicsPipelineCI.subpass];
3120        if (color_blend_state->attachmentCount != subpass_desc->colorAttachmentCount) {
3121            skip_call |= log_msg(
3122                my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3123                reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_02109, "DS",
3124                "vkCreateGraphicsPipelines(): Render pass (0x%" PRIxLEAST64
3125                ") subpass %u has colorAttachmentCount of %u which doesn't match the pColorBlendState->attachmentCount of %u. %s",
3126                reinterpret_cast<const uint64_t &>(pPipeline->graphicsPipelineCI.renderPass), pPipeline->graphicsPipelineCI.subpass,
3127                subpass_desc->colorAttachmentCount, color_blend_state->attachmentCount,
3128                validation_error_map[VALIDATION_ERROR_02109]);
3129        }
3130        if (!my_data->enabled_features.independentBlend) {
3131            if (pPipeline->attachments.size() > 1) {
3132                VkPipelineColorBlendAttachmentState *pAttachments = &pPipeline->attachments[0];
3133                for (size_t i = 1; i < pPipeline->attachments.size(); i++) {
3134                    // Quoting the spec: "If [the independent blend] feature is not enabled, the VkPipelineColorBlendAttachmentState
3135                    // settings for all color attachments must be identical." VkPipelineColorBlendAttachmentState contains
3136                    // only attachment state, so memcmp is best suited for the comparison
3137                    if (memcmp(static_cast<const void *>(pAttachments), static_cast<const void *>(&pAttachments[i]),
3138                               sizeof(pAttachments[0]))) {
3139                        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3140                                             __LINE__, VALIDATION_ERROR_01532, "DS",
3141                                             "Invalid Pipeline CreateInfo: If independent blend feature not "
3142                                             "enabled, all elements of pAttachments must be identical. %s",
3143                                             validation_error_map[VALIDATION_ERROR_01532]);
3144                        break;
3145                    }
3146                }
3147            }
3148        }
3149        if (!my_data->enabled_features.logicOp && (pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable != VK_FALSE)) {
3150            skip_call |=
3151                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3152                        VALIDATION_ERROR_01533, "DS",
3153                        "Invalid Pipeline CreateInfo: If logic operations feature not enabled, logicOpEnable must be VK_FALSE. %s",
3154                        validation_error_map[VALIDATION_ERROR_01533]);
3155        }
3156    }
3157
3158    // Ensure the subpass index is valid. If not, then validate_and_capture_pipeline_shader_state
3159    // produces nonsense errors that confuse users. Other layers should already
3160    // emit errors for renderpass being invalid.
3161    auto renderPass = getRenderPassState(my_data, pPipeline->graphicsPipelineCI.renderPass);
3162    if (renderPass && pPipeline->graphicsPipelineCI.subpass >= renderPass->createInfo.subpassCount) {
3163        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3164                             VALIDATION_ERROR_02122, "DS",
3165                             "Invalid Pipeline CreateInfo State: Subpass index %u "
3166                             "is out of range for this renderpass (0..%u). %s",
3167                             pPipeline->graphicsPipelineCI.subpass, renderPass->createInfo.subpassCount - 1,
3168                             validation_error_map[VALIDATION_ERROR_02122]);
3169    }
3170
3171    if (!GetDisables(my_data)->shader_validation &&
3172        !validate_and_capture_pipeline_shader_state(my_data->report_data, pPipeline, &my_data->enabled_features,
3173                                                    my_data->shaderModuleMap)) {
3174        skip_call = true;
3175    }
3176    // Each shader's stage must be unique
3177    if (pPipeline->duplicate_shaders) {
3178        for (uint32_t stage = VK_SHADER_STAGE_VERTEX_BIT; stage & VK_SHADER_STAGE_ALL_GRAPHICS; stage <<= 1) {
3179            if (pPipeline->duplicate_shaders & stage) {
3180                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
3181                                     __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3182                                     "Invalid Pipeline CreateInfo State: Multiple shaders provided for stage %s",
3183                                     string_VkShaderStageFlagBits(VkShaderStageFlagBits(stage)));
3184            }
3185        }
3186    }
3187    // VS is required
3188    if (!(pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT)) {
3189        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3190                             VALIDATION_ERROR_00532, "DS", "Invalid Pipeline CreateInfo State: Vertex Shader required. %s",
3191                             validation_error_map[VALIDATION_ERROR_00532]);
3192    }
3193    // Either both or neither TC/TE shaders should be defined
3194    if ((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) &&
3195        !(pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)) {
3196        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3197                             VALIDATION_ERROR_00534, "DS",
3198                             "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair. %s",
3199                             validation_error_map[VALIDATION_ERROR_00534]);
3200    }
3201    if (!(pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) &&
3202        (pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)) {
3203        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3204                             VALIDATION_ERROR_00535, "DS",
3205                             "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair. %s",
3206                             validation_error_map[VALIDATION_ERROR_00535]);
3207    }
3208    // Compute shaders should be specified independent of Gfx shaders
3209    if (pPipeline->active_shaders & VK_SHADER_STAGE_COMPUTE_BIT) {
3210        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3211                             VALIDATION_ERROR_00533, "DS",
3212                             "Invalid Pipeline CreateInfo State: Do not specify Compute Shader for Gfx Pipeline. %s",
3213                             validation_error_map[VALIDATION_ERROR_00533]);
3214    }
3215    // VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid for tessellation pipelines.
3216    // Mismatching primitive topology and tessellation fails graphics pipeline creation.
3217    if (pPipeline->active_shaders & (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) &&
3218        (!pPipeline->graphicsPipelineCI.pInputAssemblyState ||
3219         pPipeline->graphicsPipelineCI.pInputAssemblyState->topology != VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
3220        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3221                             VALIDATION_ERROR_02099, "DS",
3222                             "Invalid Pipeline CreateInfo State: "
3223                             "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST must be set as IA "
3224                             "topology for tessellation pipelines. %s",
3225                             validation_error_map[VALIDATION_ERROR_02099]);
3226    }
3227    if (pPipeline->graphicsPipelineCI.pInputAssemblyState &&
3228        pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) {
3229        if (~pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) {
3230            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3231                                 VALIDATION_ERROR_02100, "DS",
3232                                 "Invalid Pipeline CreateInfo State: "
3233                                 "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3234                                 "topology is only valid for tessellation pipelines. %s",
3235                                 validation_error_map[VALIDATION_ERROR_02100]);
3236        }
3237    }
3238
3239    if (pPipeline->graphicsPipelineCI.pTessellationState &&
3240        ((pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints == 0) ||
3241         (pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints >
3242          my_data->phys_dev_properties.properties.limits.maxTessellationPatchSize))) {
3243        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3244                             VALIDATION_ERROR_01426, "DS",
3245                             "Invalid Pipeline CreateInfo State: "
3246                             "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3247                             "topology used with patchControlPoints value %u."
3248                             " patchControlPoints should be >0 and <=%u. %s",
3249                             pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints,
3250                             my_data->phys_dev_properties.properties.limits.maxTessellationPatchSize,
3251                             validation_error_map[VALIDATION_ERROR_01426]);
3252    }
3253
3254    // If a rasterization state is provided, make sure that the line width conforms to the HW.
3255    if (pPipeline->graphicsPipelineCI.pRasterizationState) {
3256        if (!isDynamic(pPipeline, VK_DYNAMIC_STATE_LINE_WIDTH)) {
3257            skip_call |= verifyLineWidth(my_data, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE,
3258                                         reinterpret_cast<uint64_t const &>(pPipeline->pipeline),
3259                                         pPipeline->graphicsPipelineCI.pRasterizationState->lineWidth);
3260        }
3261    }
3262
3263    // If rasterization is not disabled and subpass uses a depth/stencil attachment, pDepthStencilState must be a pointer to a
3264    // valid structure
3265    if (pPipeline->graphicsPipelineCI.pRasterizationState &&
3266        (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
3267        auto subpass_desc = renderPass ? &renderPass->createInfo.pSubpasses[pPipeline->graphicsPipelineCI.subpass] : nullptr;
3268        if (subpass_desc && subpass_desc->pDepthStencilAttachment &&
3269            subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
3270            if (!pPipeline->graphicsPipelineCI.pDepthStencilState) {
3271                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
3272                                     0, __LINE__, VALIDATION_ERROR_02115, "DS",
3273                                     "Invalid Pipeline CreateInfo State: "
3274                                     "pDepthStencilState is NULL when rasterization is enabled and subpass uses a "
3275                                     "depth/stencil attachment. %s",
3276                                     validation_error_map[VALIDATION_ERROR_02115]);
3277            }
3278        }
3279    }
3280    return skip_call;
3281}
3282
3283// Free the Pipeline nodes
3284static void deletePipelines(layer_data *my_data) {
3285    if (my_data->pipelineMap.size() <= 0) return;
3286    for (auto &pipe_map_pair : my_data->pipelineMap) {
3287        delete pipe_map_pair.second;
3288    }
3289    my_data->pipelineMap.clear();
3290}
3291
3292// Block of code at start here specifically for managing/tracking DSs
3293
3294// Return Pool node ptr for specified pool or else NULL
3295DESCRIPTOR_POOL_STATE *getDescriptorPoolState(const layer_data *dev_data, const VkDescriptorPool pool) {
3296    auto pool_it = dev_data->descriptorPoolMap.find(pool);
3297    if (pool_it == dev_data->descriptorPoolMap.end()) {
3298        return NULL;
3299    }
3300    return pool_it->second;
3301}
3302
3303// Return false if update struct is of valid type, otherwise flag error and return code from callback
3304static bool validUpdateStruct(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
3305    switch (pUpdateStruct->sType) {
3306        case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3307        case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3308            return false;
3309        default:
3310            return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3311                           DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3312                           "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3313                           string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
3314    }
3315}
3316
3317// Set count for given update struct in the last parameter
3318static uint32_t getUpdateCount(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
3319    switch (pUpdateStruct->sType) {
3320        case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3321            return ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorCount;
3322        case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3323            // TODO : Need to understand this case better and make sure code is correct
3324            return ((VkCopyDescriptorSet *)pUpdateStruct)->descriptorCount;
3325        default:
3326            return 0;
3327    }
3328}
3329
3330// For given layout and update, return the first overall index of the layout that is updated
3331static uint32_t getUpdateStartIndex(layer_data *my_data, const VkDevice device, const uint32_t binding_start_index,
3332                                    const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
3333    return binding_start_index + arrayIndex;
3334}
3335// For given layout and update, return the last overall index of the layout that is updated
3336static uint32_t getUpdateEndIndex(layer_data *my_data, const VkDevice device, const uint32_t binding_start_index,
3337                                  const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
3338    uint32_t count = getUpdateCount(my_data, device, pUpdateStruct);
3339    return binding_start_index + arrayIndex + count - 1;
3340}
3341// Verify that the descriptor type in the update struct matches what's expected by the layout
3342static bool validateUpdateConsistency(layer_data *my_data, const VkDevice device, const VkDescriptorType layout_type,
3343                                      const GENERIC_HEADER *pUpdateStruct, uint32_t startIndex, uint32_t endIndex) {
3344    // First get actual type of update
3345    bool skip_call = false;
3346    VkDescriptorType actualType = VK_DESCRIPTOR_TYPE_MAX_ENUM;
3347    switch (pUpdateStruct->sType) {
3348        case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3349            actualType = ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorType;
3350            break;
3351        case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3352            // No need to validate
3353            return false;
3354            break;
3355        default:
3356            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3357                                 DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3358                                 "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3359                                 string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
3360    }
3361    if (!skip_call) {
3362        if (layout_type != actualType) {
3363            skip_call |= log_msg(
3364                my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3365                DRAWSTATE_DESCRIPTOR_TYPE_MISMATCH, "DS",
3366                "Write descriptor update has descriptor type %s that does not match overlapping binding descriptor type of %s!",
3367                string_VkDescriptorType(actualType), string_VkDescriptorType(layout_type));
3368        }
3369    }
3370    return skip_call;
3371}
3372
3373// Validate that given set is valid and that it's not being used by an in-flight CmdBuffer
3374// func_str is the name of the calling function
3375// Return false if no errors occur
3376// Return true if validation error occurs and callback returns true (to skip upcoming API call down the chain)
3377static bool validateIdleDescriptorSet(const layer_data *dev_data, VkDescriptorSet set, std::string func_str) {
3378    if (dev_data->instance_data->disabled.idle_descriptor_set) return false;
3379    bool skip_call = false;
3380    auto set_node = dev_data->setMap.find(set);
3381    if (set_node == dev_data->setMap.end()) {
3382        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3383                             (uint64_t)(set), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
3384                             "Cannot call %s() on descriptor set 0x%" PRIxLEAST64 " that has not been allocated.", func_str.c_str(),
3385                             (uint64_t)(set));
3386    } else {
3387        // TODO : This covers various error cases so should pass error enum into this function and use passed in enum here
3388        if (set_node->second->in_use.load()) {
3389            skip_call |=
3390                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3391                        (uint64_t)(set), __LINE__, VALIDATION_ERROR_00919, "DS",
3392                        "Cannot call %s() on descriptor set 0x%" PRIxLEAST64 " that is in use by a command buffer. %s",
3393                        func_str.c_str(), (uint64_t)(set), validation_error_map[VALIDATION_ERROR_00919]);
3394        }
3395    }
3396    return skip_call;
3397}
3398
3399// Remove set from setMap and delete the set
3400static void freeDescriptorSet(layer_data *dev_data, cvdescriptorset::DescriptorSet *descriptor_set) {
3401    dev_data->setMap.erase(descriptor_set->GetSet());
3402    delete descriptor_set;
3403}
3404// Free all DS Pools including their Sets & related sub-structs
3405// NOTE : Calls to this function should be wrapped in mutex
3406static void deletePools(layer_data *my_data) {
3407    if (my_data->descriptorPoolMap.size() <= 0) return;
3408    for (auto ii = my_data->descriptorPoolMap.begin(); ii != my_data->descriptorPoolMap.end(); ++ii) {
3409        // Remove this pools' sets from setMap and delete them
3410        for (auto ds : (*ii).second->sets) {
3411            freeDescriptorSet(my_data, ds);
3412        }
3413        (*ii).second->sets.clear();
3414    }
3415    my_data->descriptorPoolMap.clear();
3416}
3417
3418static void clearDescriptorPool(layer_data *my_data, const VkDevice device, const VkDescriptorPool pool,
3419                                VkDescriptorPoolResetFlags flags) {
3420    DESCRIPTOR_POOL_STATE *pPool = getDescriptorPoolState(my_data, pool);
3421    // TODO: validate flags
3422    // For every set off of this pool, clear it, remove from setMap, and free cvdescriptorset::DescriptorSet
3423    for (auto ds : pPool->sets) {
3424        freeDescriptorSet(my_data, ds);
3425    }
3426    pPool->sets.clear();
3427    // Reset available count for each type and available sets for this pool
3428    for (uint32_t i = 0; i < pPool->availableDescriptorTypeCount.size(); ++i) {
3429        pPool->availableDescriptorTypeCount[i] = pPool->maxDescriptorTypeCount[i];
3430    }
3431    pPool->availableSets = pPool->maxSets;
3432}
3433
3434// For given CB object, fetch associated CB Node from map
3435GLOBAL_CB_NODE *getCBNode(layer_data const *my_data, const VkCommandBuffer cb) {
3436    auto it = my_data->commandBufferMap.find(cb);
3437    if (it == my_data->commandBufferMap.end()) {
3438        return NULL;
3439    }
3440    return it->second;
3441}
3442// Free all CB Nodes
3443// NOTE : Calls to this function should be wrapped in mutex
3444static void deleteCommandBuffers(layer_data *my_data) {
3445    if (my_data->commandBufferMap.empty()) {
3446        return;
3447    }
3448    for (auto ii = my_data->commandBufferMap.begin(); ii != my_data->commandBufferMap.end(); ++ii) {
3449        delete (*ii).second;
3450    }
3451    my_data->commandBufferMap.clear();
3452}
3453
3454static bool report_error_no_cb_begin(const layer_data *dev_data, const VkCommandBuffer cb, const char *caller_name) {
3455    return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3456                   (uint64_t)cb, __LINE__, DRAWSTATE_NO_BEGIN_COMMAND_BUFFER, "DS",
3457                   "You must call vkBeginCommandBuffer() before this call to %s", caller_name);
3458}
3459
3460// If a renderpass is active, verify that the given command type is appropriate for current subpass state
3461bool ValidateCmdSubpassState(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd_type) {
3462    if (!pCB->activeRenderPass) return false;
3463    bool skip_call = false;
3464    if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS &&
3465        (cmd_type != CMD_EXECUTECOMMANDS && cmd_type != CMD_NEXTSUBPASS && cmd_type != CMD_ENDRENDERPASS)) {
3466        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3467                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3468                             "Commands cannot be called in a subpass using secondary command buffers.");
3469    } else if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_INLINE && cmd_type == CMD_EXECUTECOMMANDS) {
3470        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3471                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3472                             "vkCmdExecuteCommands() cannot be called in a subpass using inline commands.");
3473    }
3474    return skip_call;
3475}
3476
3477static bool checkGraphicsBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
3478    if (!(flags & VK_QUEUE_GRAPHICS_BIT))
3479        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3480                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3481                       "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
3482    return false;
3483}
3484
3485static bool checkComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
3486    if (!(flags & VK_QUEUE_COMPUTE_BIT))
3487        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3488                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3489                       "Cannot call %s on a command buffer allocated from a pool without compute capabilities.", name);
3490    return false;
3491}
3492
3493static bool checkGraphicsOrComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
3494    if (!((flags & VK_QUEUE_GRAPHICS_BIT) || (flags & VK_QUEUE_COMPUTE_BIT)))
3495        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3496                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3497                       "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
3498    return false;
3499}
3500
3501// Validate the given command being added to the specified cmd buffer, flagging errors if CB is not in the recording state or if
3502// there's an issue with the Cmd ordering
3503bool ValidateCmd(layer_data *my_data, GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd, const char *caller_name) {
3504    bool skip_call = false;
3505    auto pPool = getCommandPoolNode(my_data, pCB->createInfo.commandPool);
3506    if (pPool) {
3507        VkQueueFlags flags = my_data->phys_dev_properties.queue_family_properties[pPool->queueFamilyIndex].queueFlags;
3508        switch (cmd) {
3509            case CMD_BINDPIPELINE:
3510            case CMD_BINDPIPELINEDELTA:
3511            case CMD_BINDDESCRIPTORSETS:
3512            case CMD_FILLBUFFER:
3513            case CMD_CLEARCOLORIMAGE:
3514            case CMD_SETEVENT:
3515            case CMD_RESETEVENT:
3516            case CMD_WAITEVENTS:
3517            case CMD_BEGINQUERY:
3518            case CMD_ENDQUERY:
3519            case CMD_RESETQUERYPOOL:
3520            case CMD_COPYQUERYPOOLRESULTS:
3521            case CMD_WRITETIMESTAMP:
3522                skip_call |= checkGraphicsOrComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
3523                break;
3524            case CMD_SETVIEWPORTSTATE:
3525            case CMD_SETSCISSORSTATE:
3526            case CMD_SETLINEWIDTHSTATE:
3527            case CMD_SETDEPTHBIASSTATE:
3528            case CMD_SETBLENDSTATE:
3529            case CMD_SETDEPTHBOUNDSSTATE:
3530            case CMD_SETSTENCILREADMASKSTATE:
3531            case CMD_SETSTENCILWRITEMASKSTATE:
3532            case CMD_SETSTENCILREFERENCESTATE:
3533            case CMD_BINDINDEXBUFFER:
3534            case CMD_BINDVERTEXBUFFER:
3535            case CMD_DRAW:
3536            case CMD_DRAWINDEXED:
3537            case CMD_DRAWINDIRECT:
3538            case CMD_DRAWINDEXEDINDIRECT:
3539            case CMD_BLITIMAGE:
3540            case CMD_CLEARATTACHMENTS:
3541            case CMD_CLEARDEPTHSTENCILIMAGE:
3542            case CMD_RESOLVEIMAGE:
3543            case CMD_BEGINRENDERPASS:
3544            case CMD_NEXTSUBPASS:
3545            case CMD_ENDRENDERPASS:
3546                skip_call |= checkGraphicsBit(my_data, flags, cmdTypeToString(cmd).c_str());
3547                break;
3548            case CMD_DISPATCH:
3549            case CMD_DISPATCHINDIRECT:
3550                skip_call |= checkComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
3551                break;
3552            case CMD_COPYBUFFER:
3553            case CMD_COPYIMAGE:
3554            case CMD_COPYBUFFERTOIMAGE:
3555            case CMD_COPYIMAGETOBUFFER:
3556            case CMD_CLONEIMAGEDATA:
3557            case CMD_UPDATEBUFFER:
3558            case CMD_PIPELINEBARRIER:
3559            case CMD_EXECUTECOMMANDS:
3560            case CMD_END:
3561                break;
3562            default:
3563                break;
3564        }
3565    }
3566    if (pCB->state != CB_RECORDING) {
3567        skip_call |= report_error_no_cb_begin(my_data, pCB->commandBuffer, caller_name);
3568    } else {
3569        skip_call |= ValidateCmdSubpassState(my_data, pCB, cmd);
3570    }
3571    return skip_call;
3572}
3573
3574void UpdateCmdBufferLastCmd(layer_data *my_data, GLOBAL_CB_NODE *cb_state, const CMD_TYPE cmd) {
3575    if (cb_state->state == CB_RECORDING) {
3576        cb_state->last_cmd = cmd;
3577    }
3578}
3579// For given object struct return a ptr of BASE_NODE type for its wrapping struct
3580BASE_NODE *GetStateStructPtrFromObject(layer_data *dev_data, VK_OBJECT object_struct) {
3581    BASE_NODE *base_ptr = nullptr;
3582    switch (object_struct.type) {
3583        case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT: {
3584            base_ptr = getSetNode(dev_data, reinterpret_cast<VkDescriptorSet &>(object_struct.handle));
3585            break;
3586        }
3587        case VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT: {
3588            base_ptr = getSamplerState(dev_data, reinterpret_cast<VkSampler &>(object_struct.handle));
3589            break;
3590        }
3591        case VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT: {
3592            base_ptr = getQueryPoolNode(dev_data, reinterpret_cast<VkQueryPool &>(object_struct.handle));
3593            break;
3594        }
3595        case VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT: {
3596            base_ptr = getPipelineState(dev_data, reinterpret_cast<VkPipeline &>(object_struct.handle));
3597            break;
3598        }
3599        case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
3600            base_ptr = getBufferState(dev_data, reinterpret_cast<VkBuffer &>(object_struct.handle));
3601            break;
3602        }
3603        case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT: {
3604            base_ptr = getBufferViewState(dev_data, reinterpret_cast<VkBufferView &>(object_struct.handle));
3605            break;
3606        }
3607        case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
3608            base_ptr = getImageState(dev_data, reinterpret_cast<VkImage &>(object_struct.handle));
3609            break;
3610        }
3611        case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT: {
3612            base_ptr = getImageViewState(dev_data, reinterpret_cast<VkImageView &>(object_struct.handle));
3613            break;
3614        }
3615        case VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT: {
3616            base_ptr = getEventNode(dev_data, reinterpret_cast<VkEvent &>(object_struct.handle));
3617            break;
3618        }
3619        case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT: {
3620            base_ptr = getDescriptorPoolState(dev_data, reinterpret_cast<VkDescriptorPool &>(object_struct.handle));
3621            break;
3622        }
3623        case VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT: {
3624            base_ptr = getCommandPoolNode(dev_data, reinterpret_cast<VkCommandPool &>(object_struct.handle));
3625            break;
3626        }
3627        case VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT: {
3628            base_ptr = getFramebufferState(dev_data, reinterpret_cast<VkFramebuffer &>(object_struct.handle));
3629            break;
3630        }
3631        case VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT: {
3632            base_ptr = getRenderPassState(dev_data, reinterpret_cast<VkRenderPass &>(object_struct.handle));
3633            break;
3634        }
3635        case VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT: {
3636            base_ptr = getMemObjInfo(dev_data, reinterpret_cast<VkDeviceMemory &>(object_struct.handle));
3637            break;
3638        }
3639        default:
3640            // TODO : Any other objects to be handled here?
3641            assert(0);
3642            break;
3643    }
3644    return base_ptr;
3645}
3646
3647// Tie the VK_OBJECT to the cmd buffer which includes:
3648//  Add object_binding to cmd buffer
3649//  Add cb_binding to object
3650static void addCommandBufferBinding(std::unordered_set<GLOBAL_CB_NODE *> *cb_bindings, VK_OBJECT obj, GLOBAL_CB_NODE *cb_node) {
3651    cb_bindings->insert(cb_node);
3652    cb_node->object_bindings.insert(obj);
3653}
3654// For a given object, if cb_node is in that objects cb_bindings, remove cb_node
3655static void removeCommandBufferBinding(layer_data *dev_data, VK_OBJECT const *object, GLOBAL_CB_NODE *cb_node) {
3656    BASE_NODE *base_obj = GetStateStructPtrFromObject(dev_data, *object);
3657    if (base_obj) base_obj->cb_bindings.erase(cb_node);
3658}
3659// Reset the command buffer state
3660//  Maintain the createInfo and set state to CB_NEW, but clear all other state
3661static void resetCB(layer_data *dev_data, const VkCommandBuffer cb) {
3662    GLOBAL_CB_NODE *pCB = dev_data->commandBufferMap[cb];
3663    if (pCB) {
3664        pCB->in_use.store(0);
3665        pCB->last_cmd = CMD_NONE;
3666        // Reset CB state (note that createInfo is not cleared)
3667        pCB->commandBuffer = cb;
3668        memset(&pCB->beginInfo, 0, sizeof(VkCommandBufferBeginInfo));
3669        memset(&pCB->inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo));
3670        pCB->numCmds = 0;
3671        memset(pCB->drawCount, 0, NUM_DRAW_TYPES * sizeof(uint64_t));
3672        pCB->state = CB_NEW;
3673        pCB->submitCount = 0;
3674        pCB->status = 0;
3675        pCB->viewportMask = 0;
3676        pCB->scissorMask = 0;
3677
3678        for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
3679            pCB->lastBound[i].reset();
3680        }
3681
3682        memset(&pCB->activeRenderPassBeginInfo, 0, sizeof(pCB->activeRenderPassBeginInfo));
3683        pCB->activeRenderPass = nullptr;
3684        pCB->activeSubpassContents = VK_SUBPASS_CONTENTS_INLINE;
3685        pCB->activeSubpass = 0;
3686        pCB->broken_bindings.clear();
3687        pCB->waitedEvents.clear();
3688        pCB->events.clear();
3689        pCB->writeEventsBeforeWait.clear();
3690        pCB->waitedEventsBeforeQueryReset.clear();
3691        pCB->queryToStateMap.clear();
3692        pCB->activeQueries.clear();
3693        pCB->startedQueries.clear();
3694        pCB->imageSubresourceMap.clear();
3695        pCB->imageLayoutMap.clear();
3696        pCB->eventToStageMap.clear();
3697        pCB->drawData.clear();
3698        pCB->currentDrawData.buffers.clear();
3699        pCB->vertex_buffer_used = false;
3700        pCB->primaryCommandBuffer = VK_NULL_HANDLE;
3701        // Make sure any secondaryCommandBuffers are removed from globalInFlight
3702        for (auto secondary_cb : pCB->secondaryCommandBuffers) {
3703            dev_data->globalInFlightCmdBuffers.erase(secondary_cb);
3704        }
3705        pCB->secondaryCommandBuffers.clear();
3706        pCB->updateImages.clear();
3707        pCB->updateBuffers.clear();
3708        clear_cmd_buf_and_mem_references(dev_data, pCB);
3709        pCB->eventUpdates.clear();
3710        pCB->queryUpdates.clear();
3711
3712        // Remove object bindings
3713        for (auto obj : pCB->object_bindings) {
3714            removeCommandBufferBinding(dev_data, &obj, pCB);
3715        }
3716        pCB->object_bindings.clear();
3717        // Remove this cmdBuffer's reference from each FrameBuffer's CB ref list
3718        for (auto framebuffer : pCB->framebuffers) {
3719            auto fb_state = getFramebufferState(dev_data, framebuffer);
3720            if (fb_state) fb_state->cb_bindings.erase(pCB);
3721        }
3722        pCB->framebuffers.clear();
3723        pCB->activeFramebuffer = VK_NULL_HANDLE;
3724    }
3725}
3726
3727// Set PSO-related status bits for CB, including dynamic state set via PSO
3728static void set_cb_pso_status(GLOBAL_CB_NODE *pCB, const PIPELINE_STATE *pPipe) {
3729    // Account for any dynamic state not set via this PSO
3730    if (!pPipe->graphicsPipelineCI.pDynamicState ||
3731        !pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount) {  // All state is static
3732        pCB->status |= CBSTATUS_ALL_STATE_SET;
3733    } else {
3734        // First consider all state on
3735        // Then unset any state that's noted as dynamic in PSO
3736        // Finally OR that into CB statemask
3737        CBStatusFlags psoDynStateMask = CBSTATUS_ALL_STATE_SET;
3738        for (uint32_t i = 0; i < pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
3739            switch (pPipe->graphicsPipelineCI.pDynamicState->pDynamicStates[i]) {
3740                case VK_DYNAMIC_STATE_LINE_WIDTH:
3741                    psoDynStateMask &= ~CBSTATUS_LINE_WIDTH_SET;
3742                    break;
3743                case VK_DYNAMIC_STATE_DEPTH_BIAS:
3744                    psoDynStateMask &= ~CBSTATUS_DEPTH_BIAS_SET;
3745                    break;
3746                case VK_DYNAMIC_STATE_BLEND_CONSTANTS:
3747                    psoDynStateMask &= ~CBSTATUS_BLEND_CONSTANTS_SET;
3748                    break;
3749                case VK_DYNAMIC_STATE_DEPTH_BOUNDS:
3750                    psoDynStateMask &= ~CBSTATUS_DEPTH_BOUNDS_SET;
3751                    break;
3752                case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK:
3753                    psoDynStateMask &= ~CBSTATUS_STENCIL_READ_MASK_SET;
3754                    break;
3755                case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK:
3756                    psoDynStateMask &= ~CBSTATUS_STENCIL_WRITE_MASK_SET;
3757                    break;
3758                case VK_DYNAMIC_STATE_STENCIL_REFERENCE:
3759                    psoDynStateMask &= ~CBSTATUS_STENCIL_REFERENCE_SET;
3760                    break;
3761                default:
3762                    // TODO : Flag error here
3763                    break;
3764            }
3765        }
3766        pCB->status |= psoDynStateMask;
3767    }
3768}
3769
3770// Flags validation error if the associated call is made inside a render pass. The apiName routine should ONLY be called outside a
3771// render pass.
3772bool insideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName, UNIQUE_VALIDATION_ERROR_CODE msgCode) {
3773    bool inside = false;
3774    if (pCB->activeRenderPass) {
3775        inside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3776                         (uint64_t)pCB->commandBuffer, __LINE__, msgCode, "DS",
3777                         "%s: It is invalid to issue this call inside an active render pass (0x%" PRIxLEAST64 "). %s", apiName,
3778                         (uint64_t)pCB->activeRenderPass->renderPass, validation_error_map[msgCode]);
3779    }
3780    return inside;
3781}
3782
3783// Flags validation error if the associated call is made outside a render pass. The apiName
3784// routine should ONLY be called inside a render pass.
3785bool outsideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName,
3786                       UNIQUE_VALIDATION_ERROR_CODE msgCode) {
3787    bool outside = false;
3788    if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (!pCB->activeRenderPass)) ||
3789        ((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && (!pCB->activeRenderPass) &&
3790         !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))) {
3791        outside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3792                          (uint64_t)pCB->commandBuffer, __LINE__, msgCode, "DS",
3793                          "%s: This call must be issued inside an active render pass. %s", apiName, validation_error_map[msgCode]);
3794    }
3795    return outside;
3796}
3797
3798static void init_core_validation(instance_layer_data *instance_data, const VkAllocationCallbacks *pAllocator) {
3799    layer_debug_actions(instance_data->report_data, instance_data->logging_callback, pAllocator, "lunarg_core_validation");
3800}
3801
3802static void checkInstanceRegisterExtensions(const VkInstanceCreateInfo *pCreateInfo, instance_layer_data *instance_data) {
3803    for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
3804        if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SURFACE_EXTENSION_NAME))
3805            instance_data->surfaceExtensionEnabled = true;
3806        if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_DISPLAY_EXTENSION_NAME))
3807            instance_data->displayExtensionEnabled = true;
3808#ifdef VK_USE_PLATFORM_ANDROID_KHR
3809        if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_ANDROID_SURFACE_EXTENSION_NAME))
3810            instance_data->androidSurfaceExtensionEnabled = true;
3811#endif
3812#ifdef VK_USE_PLATFORM_MIR_KHR
3813        if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_MIR_SURFACE_EXTENSION_NAME))
3814            instance_data->mirSurfaceExtensionEnabled = true;
3815#endif
3816#ifdef VK_USE_PLATFORM_WAYLAND_KHR
3817        if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME))
3818            instance_data->waylandSurfaceExtensionEnabled = true;
3819#endif
3820#ifdef VK_USE_PLATFORM_WIN32_KHR
3821        if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_WIN32_SURFACE_EXTENSION_NAME))
3822            instance_data->win32SurfaceExtensionEnabled = true;
3823#endif
3824#ifdef VK_USE_PLATFORM_XCB_KHR
3825        if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_XCB_SURFACE_EXTENSION_NAME))
3826            instance_data->xcbSurfaceExtensionEnabled = true;
3827#endif
3828#ifdef VK_USE_PLATFORM_XLIB_KHR
3829        if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_XLIB_SURFACE_EXTENSION_NAME))
3830            instance_data->xlibSurfaceExtensionEnabled = true;
3831#endif
3832    }
3833}
3834
3835VKAPI_ATTR VkResult VKAPI_CALL CreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
3836                                              VkInstance *pInstance) {
3837    VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
3838
3839    assert(chain_info->u.pLayerInfo);
3840    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
3841    PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
3842    if (fpCreateInstance == NULL) return VK_ERROR_INITIALIZATION_FAILED;
3843
3844    // Advance the link info for the next element on the chain
3845    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
3846
3847    VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
3848    if (result != VK_SUCCESS) return result;
3849
3850    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(*pInstance), instance_layer_data_map);
3851    instance_data->instance = *pInstance;
3852    layer_init_instance_dispatch_table(*pInstance, &instance_data->dispatch_table, fpGetInstanceProcAddr);
3853    instance_data->report_data = debug_report_create_instance(
3854        &instance_data->dispatch_table, *pInstance, pCreateInfo->enabledExtensionCount, pCreateInfo->ppEnabledExtensionNames);
3855    checkInstanceRegisterExtensions(pCreateInfo, instance_data);
3856    init_core_validation(instance_data, pAllocator);
3857
3858    ValidateLayerOrdering(*pCreateInfo);
3859
3860    return result;
3861}
3862
3863// Hook DestroyInstance to remove tableInstanceMap entry
3864VKAPI_ATTR void VKAPI_CALL DestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
3865    // TODOSC : Shouldn't need any customization here
3866    dispatch_key key = get_dispatch_key(instance);
3867    // TBD: Need any locking this early, in case this function is called at the
3868    // same time by more than one thread?
3869    instance_layer_data *instance_data = get_my_data_ptr(key, instance_layer_data_map);
3870    instance_data->dispatch_table.DestroyInstance(instance, pAllocator);
3871
3872    std::lock_guard<std::mutex> lock(global_lock);
3873    // Clean up logging callback, if any
3874    while (instance_data->logging_callback.size() > 0) {
3875        VkDebugReportCallbackEXT callback = instance_data->logging_callback.back();
3876        layer_destroy_msg_callback(instance_data->report_data, callback, pAllocator);
3877        instance_data->logging_callback.pop_back();
3878    }
3879
3880    layer_debug_report_destroy_instance(instance_data->report_data);
3881    layer_data_map.erase(key);
3882}
3883
3884static void checkDeviceRegisterExtensions(const VkDeviceCreateInfo *pCreateInfo, VkDevice device) {
3885    uint32_t i;
3886    // TBD: Need any locking, in case this function is called at the same time by more than one thread?
3887    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
3888    dev_data->device_extensions.wsi_enabled = false;
3889    dev_data->device_extensions.wsi_display_swapchain_enabled = false;
3890    dev_data->device_extensions.nv_glsl_shader_enabled = false;
3891
3892    for (i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
3893        if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME) == 0) {
3894            dev_data->device_extensions.wsi_enabled = true;
3895        }
3896        if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_DISPLAY_SWAPCHAIN_EXTENSION_NAME) == 0) {
3897            dev_data->device_extensions.wsi_display_swapchain_enabled = true;
3898        }
3899        if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_NV_GLSL_SHADER_EXTENSION_NAME) == 0) {
3900            dev_data->device_extensions.nv_glsl_shader_enabled = true;
3901        }
3902    }
3903}
3904
3905// Verify that queue family has been properly requested
3906static bool ValidateRequestedQueueFamilyProperties(instance_layer_data *instance_data, VkPhysicalDevice gpu,
3907                                                   const VkDeviceCreateInfo *create_info) {
3908    bool skip_call = false;
3909    auto physical_device_state = getPhysicalDeviceState(instance_data, gpu);
3910    // First check is app has actually requested queueFamilyProperties
3911    if (!physical_device_state) {
3912        skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
3913                             VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_MUST_QUERY_COUNT, "DL",
3914                             "Invalid call to vkCreateDevice() w/o first calling vkEnumeratePhysicalDevices().");
3915    } else if (QUERY_DETAILS != physical_device_state->vkGetPhysicalDeviceQueueFamilyPropertiesState) {
3916        // TODO: This is not called out as an invalid use in the spec so make more informative recommendation.
3917        skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
3918                             VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST,
3919                             "DL", "Call to vkCreateDevice() w/o first calling vkGetPhysicalDeviceQueueFamilyProperties().");
3920    } else {
3921        // Check that the requested queue properties are valid
3922        for (uint32_t i = 0; i < create_info->queueCreateInfoCount; i++) {
3923            uint32_t requestedIndex = create_info->pQueueCreateInfos[i].queueFamilyIndex;
3924            if (requestedIndex >= physical_device_state->queue_family_properties.size()) {
3925                skip_call |= log_msg(
3926                    instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
3927                    __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST, "DL",
3928                    "Invalid queue create request in vkCreateDevice(). Invalid queueFamilyIndex %u requested.", requestedIndex);
3929            } else if (create_info->pQueueCreateInfos[i].queueCount >
3930                       physical_device_state->queue_family_properties[requestedIndex].queueCount) {
3931                skip_call |= log_msg(
3932                    instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
3933                    __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST, "DL",
3934                    "Invalid queue create request in vkCreateDevice(). QueueFamilyIndex %u only has %u queues, but "
3935                    "requested queueCount is %u.",
3936                    requestedIndex, physical_device_state->queue_family_properties[requestedIndex].queueCount,
3937                    create_info->pQueueCreateInfos[i].queueCount);
3938            }
3939        }
3940    }
3941    return skip_call;
3942}
3943
3944// Verify that features have been queried and that they are available
3945static bool ValidateRequestedFeatures(instance_layer_data *dev_data, VkPhysicalDevice phys,
3946                                      const VkPhysicalDeviceFeatures *requested_features) {
3947    bool skip_call = false;
3948
3949    auto phys_device_state = getPhysicalDeviceState(dev_data, phys);
3950    const VkBool32 *actual = reinterpret_cast<VkBool32 *>(&phys_device_state->features);
3951    const VkBool32 *requested = reinterpret_cast<const VkBool32 *>(requested_features);
3952    // TODO : This is a nice, compact way to loop through struct, but a bad way to report issues
3953    //  Need to provide the struct member name with the issue. To do that seems like we'll
3954    //  have to loop through each struct member which should be done w/ codegen to keep in synch.
3955    uint32_t errors = 0;
3956    uint32_t total_bools = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
3957    for (uint32_t i = 0; i < total_bools; i++) {
3958        if (requested[i] > actual[i]) {
3959            // TODO: Add index to struct member name helper to be able to include a feature name
3960            skip_call |=
3961                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
3962                        __LINE__, DEVLIMITS_INVALID_FEATURE_REQUESTED, "DL",
3963                        "While calling vkCreateDevice(), requesting feature #%u in VkPhysicalDeviceFeatures struct, "
3964                        "which is not available on this device.",
3965                        i);
3966            errors++;
3967        }
3968    }
3969    if (errors && (UNCALLED == phys_device_state->vkGetPhysicalDeviceFeaturesState)) {
3970        // If user didn't request features, notify them that they should
3971        // TODO: Verify this against the spec. I believe this is an invalid use of the API and should return an error
3972        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
3973                             0, __LINE__, DEVLIMITS_INVALID_FEATURE_REQUESTED, "DL",
3974                             "You requested features that are unavailable on this device. You should first query feature "
3975                             "availability by calling vkGetPhysicalDeviceFeatures().");
3976    }
3977    return skip_call;
3978}
3979
3980VKAPI_ATTR VkResult VKAPI_CALL CreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
3981                                            const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
3982    instance_layer_data *my_instance_data = get_my_data_ptr(get_dispatch_key(gpu), instance_layer_data_map);
3983    bool skip_call = false;
3984
3985    // Check that any requested features are available
3986    if (pCreateInfo->pEnabledFeatures) {
3987        skip_call |= ValidateRequestedFeatures(my_instance_data, gpu, pCreateInfo->pEnabledFeatures);
3988    }
3989    skip_call |= ValidateRequestedQueueFamilyProperties(my_instance_data, gpu, pCreateInfo);
3990
3991    if (skip_call) {
3992        return VK_ERROR_VALIDATION_FAILED_EXT;
3993    }
3994
3995    VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
3996
3997    assert(chain_info->u.pLayerInfo);
3998    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
3999    PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
4000    PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(my_instance_data->instance, "vkCreateDevice");
4001    if (fpCreateDevice == NULL) {
4002        return VK_ERROR_INITIALIZATION_FAILED;
4003    }
4004
4005    // Advance the link info for the next element on the chain
4006    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
4007
4008    VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
4009    if (result != VK_SUCCESS) {
4010        return result;
4011    }
4012
4013    std::unique_lock<std::mutex> lock(global_lock);
4014    layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(*pDevice), layer_data_map);
4015
4016    my_device_data->instance_data = my_instance_data;
4017    // Setup device dispatch table
4018    layer_init_device_dispatch_table(*pDevice, &my_device_data->dispatch_table, fpGetDeviceProcAddr);
4019    my_device_data->device = *pDevice;
4020    // Save PhysicalDevice handle
4021    my_device_data->physical_device = gpu;
4022
4023    my_device_data->report_data = layer_debug_report_create_device(my_instance_data->report_data, *pDevice);
4024    checkDeviceRegisterExtensions(pCreateInfo, *pDevice);
4025    // Get physical device limits for this device
4026    my_instance_data->dispatch_table.GetPhysicalDeviceProperties(gpu, &(my_device_data->phys_dev_properties.properties));
4027    uint32_t count;
4028    my_instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(gpu, &count, nullptr);
4029    my_device_data->phys_dev_properties.queue_family_properties.resize(count);
4030    my_instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(
4031        gpu, &count, &my_device_data->phys_dev_properties.queue_family_properties[0]);
4032    // TODO: device limits should make sure these are compatible
4033    if (pCreateInfo->pEnabledFeatures) {
4034        my_device_data->enabled_features = *pCreateInfo->pEnabledFeatures;
4035    } else {
4036        memset(&my_device_data->enabled_features, 0, sizeof(VkPhysicalDeviceFeatures));
4037    }
4038    // Store physical device properties and physical device mem limits into device layer_data structs
4039    my_instance_data->dispatch_table.GetPhysicalDeviceMemoryProperties(gpu, &my_device_data->phys_dev_mem_props);
4040    my_instance_data->dispatch_table.GetPhysicalDeviceProperties(gpu, &my_device_data->phys_dev_props);
4041    lock.unlock();
4042
4043    ValidateLayerOrdering(*pCreateInfo);
4044
4045    return result;
4046}
4047
4048// prototype
4049VKAPI_ATTR void VKAPI_CALL DestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
4050    // TODOSC : Shouldn't need any customization here
4051    bool skip = false;
4052    dispatch_key key = get_dispatch_key(device);
4053    layer_data *dev_data = get_my_data_ptr(key, layer_data_map);
4054    // Free all the memory
4055    std::unique_lock<std::mutex> lock(global_lock);
4056    deletePipelines(dev_data);
4057    dev_data->renderPassMap.clear();
4058    deleteCommandBuffers(dev_data);
4059    // This will also delete all sets in the pool & remove them from setMap
4060    deletePools(dev_data);
4061    // All sets should be removed
4062    assert(dev_data->setMap.empty());
4063    for (auto del_layout : dev_data->descriptorSetLayoutMap) {
4064        delete del_layout.second;
4065    }
4066    dev_data->descriptorSetLayoutMap.clear();
4067    dev_data->imageViewMap.clear();
4068    dev_data->imageMap.clear();
4069    dev_data->imageSubresourceMap.clear();
4070    dev_data->imageLayoutMap.clear();
4071    dev_data->bufferViewMap.clear();
4072    dev_data->bufferMap.clear();
4073    // Queues persist until device is destroyed
4074    dev_data->queueMap.clear();
4075    // Report any memory leaks
4076    layer_debug_report_destroy_device(device);
4077    lock.unlock();
4078
4079#if DISPATCH_MAP_DEBUG
4080    fprintf(stderr, "Device: 0x%p, key: 0x%p\n", device, key);
4081#endif
4082    if (!skip) {
4083        dev_data->dispatch_table.DestroyDevice(device, pAllocator);
4084        layer_data_map.erase(key);
4085    }
4086}
4087
4088static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION}};
4089
4090// For given stage mask, if Geometry shader stage is on w/o GS being enabled, report geo_error_id
4091//   and if Tessellation Control or Evaluation shader stages are on w/o TS being enabled, report tess_error_id
4092static bool ValidateStageMaskGsTsEnables(layer_data *dev_data, VkPipelineStageFlags stageMask, const char *caller,
4093                                         UNIQUE_VALIDATION_ERROR_CODE geo_error_id, UNIQUE_VALIDATION_ERROR_CODE tess_error_id) {
4094    bool skip = false;
4095    if (!dev_data->enabled_features.geometryShader && (stageMask & VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT)) {
4096        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
4097                        geo_error_id, "DL",
4098                        "%s call includes a stageMask with VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT bit set when "
4099                        "device does not have geometryShader feature enabled. %s",
4100                        caller, validation_error_map[geo_error_id]);
4101    }
4102    if (!dev_data->enabled_features.tessellationShader &&
4103        (stageMask & (VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT))) {
4104        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
4105                        tess_error_id, "DL",
4106                        "%s call includes a stageMask with VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT "
4107                        "and/or VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT bit(s) set when device "
4108                        "does not have tessellationShader feature enabled. %s",
4109                        caller, validation_error_map[tess_error_id]);
4110    }
4111    return skip;
4112}
4113
4114// This validates that the initial layout specified in the command buffer for
4115// the IMAGE is the same
4116// as the global IMAGE layout
4117static bool ValidateCmdBufImageLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
4118    bool skip_call = false;
4119    for (auto cb_image_data : pCB->imageLayoutMap) {
4120        VkImageLayout imageLayout;
4121        if (!FindGlobalLayout(dev_data, cb_image_data.first, imageLayout)) {
4122            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4123                                 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
4124                                 "Cannot submit cmd buffer using deleted image 0x%" PRIx64 ".",
4125                                 reinterpret_cast<const uint64_t &>(cb_image_data.first));
4126        } else {
4127            if (cb_image_data.second.initialLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
4128                // TODO: Set memory invalid which is in mem_tracker currently
4129            } else if (imageLayout != cb_image_data.second.initialLayout) {
4130                if (cb_image_data.first.hasSubresource) {
4131                    skip_call |= log_msg(
4132                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4133                        reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
4134                        "Cannot submit cmd buffer using image (0x%" PRIx64
4135                        ") [sub-resource: aspectMask 0x%X array layer %u, mip level %u], "
4136                        "with layout %s when first use is %s.",
4137                        reinterpret_cast<const uint64_t &>(cb_image_data.first.image), cb_image_data.first.subresource.aspectMask,
4138                        cb_image_data.first.subresource.arrayLayer, cb_image_data.first.subresource.mipLevel,
4139                        string_VkImageLayout(imageLayout), string_VkImageLayout(cb_image_data.second.initialLayout));
4140                } else {
4141                    skip_call |=
4142                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4143                                VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t &>(pCB->commandBuffer),
4144                                __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot submit cmd buffer using image (0x%" PRIx64
4145                                                                                ") with layout %s when "
4146                                                                                "first use is %s.",
4147                                reinterpret_cast<const uint64_t &>(cb_image_data.first.image), string_VkImageLayout(imageLayout),
4148                                string_VkImageLayout(cb_image_data.second.initialLayout));
4149                }
4150            }
4151            SetGlobalLayout(dev_data, cb_image_data.first, cb_image_data.second.layout);
4152        }
4153    }
4154    return skip_call;
4155}
4156
4157// Loop through bound objects and increment their in_use counts
4158//  For any unknown objects, flag an error
4159static bool ValidateAndIncrementBoundObjects(layer_data *dev_data, GLOBAL_CB_NODE const *cb_node) {
4160    bool skip = false;
4161    DRAW_STATE_ERROR error_code = DRAWSTATE_NONE;
4162    BASE_NODE *base_obj = nullptr;
4163    for (auto obj : cb_node->object_bindings) {
4164        switch (obj.type) {
4165            case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT: {
4166                base_obj = getSetNode(dev_data, reinterpret_cast<VkDescriptorSet &>(obj.handle));
4167                error_code = DRAWSTATE_INVALID_DESCRIPTOR_SET;
4168                break;
4169            }
4170            case VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT: {
4171                base_obj = getSamplerState(dev_data, reinterpret_cast<VkSampler &>(obj.handle));
4172                error_code = DRAWSTATE_INVALID_SAMPLER;
4173                break;
4174            }
4175            case VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT: {
4176                base_obj = getQueryPoolNode(dev_data, reinterpret_cast<VkQueryPool &>(obj.handle));
4177                error_code = DRAWSTATE_INVALID_QUERY_POOL;
4178                break;
4179            }
4180            case VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT: {
4181                base_obj = getPipelineState(dev_data, reinterpret_cast<VkPipeline &>(obj.handle));
4182                error_code = DRAWSTATE_INVALID_PIPELINE;
4183                break;
4184            }
4185            case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
4186                base_obj = getBufferState(dev_data, reinterpret_cast<VkBuffer &>(obj.handle));
4187                error_code = DRAWSTATE_INVALID_BUFFER;
4188                break;
4189            }
4190            case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT: {
4191                base_obj = getBufferViewState(dev_data, reinterpret_cast<VkBufferView &>(obj.handle));
4192                error_code = DRAWSTATE_INVALID_BUFFER_VIEW;
4193                break;
4194            }
4195            case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
4196                base_obj = getImageState(dev_data, reinterpret_cast<VkImage &>(obj.handle));
4197                error_code = DRAWSTATE_INVALID_IMAGE;
4198                break;
4199            }
4200            case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT: {
4201                base_obj = getImageViewState(dev_data, reinterpret_cast<VkImageView &>(obj.handle));
4202                error_code = DRAWSTATE_INVALID_IMAGE_VIEW;
4203                break;
4204            }
4205            case VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT: {
4206                base_obj = getEventNode(dev_data, reinterpret_cast<VkEvent &>(obj.handle));
4207                error_code = DRAWSTATE_INVALID_EVENT;
4208                break;
4209            }
4210            case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT: {
4211                base_obj = getDescriptorPoolState(dev_data, reinterpret_cast<VkDescriptorPool &>(obj.handle));
4212                error_code = DRAWSTATE_INVALID_DESCRIPTOR_POOL;
4213                break;
4214            }
4215            case VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT: {
4216                base_obj = getCommandPoolNode(dev_data, reinterpret_cast<VkCommandPool &>(obj.handle));
4217                error_code = DRAWSTATE_INVALID_COMMAND_POOL;
4218                break;
4219            }
4220            case VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT: {
4221                base_obj = getFramebufferState(dev_data, reinterpret_cast<VkFramebuffer &>(obj.handle));
4222                error_code = DRAWSTATE_INVALID_FRAMEBUFFER;
4223                break;
4224            }
4225            case VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT: {
4226                base_obj = getRenderPassState(dev_data, reinterpret_cast<VkRenderPass &>(obj.handle));
4227                error_code = DRAWSTATE_INVALID_RENDERPASS;
4228                break;
4229            }
4230            case VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT: {
4231                base_obj = getMemObjInfo(dev_data, reinterpret_cast<VkDeviceMemory &>(obj.handle));
4232                error_code = DRAWSTATE_INVALID_DEVICE_MEMORY;
4233                break;
4234            }
4235            default:
4236                // TODO : Merge handling of other objects types into this code
4237                break;
4238        }
4239        if (!base_obj) {
4240            skip |=
4241                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj.type, obj.handle, __LINE__, error_code, "DS",
4242                        "Cannot submit cmd buffer using deleted %s 0x%" PRIx64 ".", object_type_to_string(obj.type), obj.handle);
4243        } else {
4244            base_obj->in_use.fetch_add(1);
4245        }
4246    }
4247    return skip;
4248}
4249
4250// Track which resources are in-flight by atomically incrementing their "in_use" count
4251static bool validateAndIncrementResources(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
4252    bool skip_call = false;
4253
4254    cb_node->in_use.fetch_add(1);
4255    dev_data->globalInFlightCmdBuffers.insert(cb_node->commandBuffer);
4256
4257    // First Increment for all "generic" objects bound to cmd buffer, followed by special-case objects below
4258    skip_call |= ValidateAndIncrementBoundObjects(dev_data, cb_node);
4259    // TODO : We should be able to remove the NULL look-up checks from the code below as long as
4260    //  all the corresponding cases are verified to cause CB_INVALID state and the CB_INVALID state
4261    //  should then be flagged prior to calling this function
4262    for (auto drawDataElement : cb_node->drawData) {
4263        for (auto buffer : drawDataElement.buffers) {
4264            auto buffer_state = getBufferState(dev_data, buffer);
4265            if (!buffer_state) {
4266                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
4267                                     (uint64_t)(buffer), __LINE__, DRAWSTATE_INVALID_BUFFER, "DS",
4268                                     "Cannot submit cmd buffer using deleted buffer 0x%" PRIx64 ".", (uint64_t)(buffer));
4269            } else {
4270                buffer_state->in_use.fetch_add(1);
4271            }
4272        }
4273    }
4274    for (auto event : cb_node->writeEventsBeforeWait) {
4275        auto event_state = getEventNode(dev_data, event);
4276        if (event_state) event_state->write_in_use++;
4277    }
4278    return skip_call;
4279}
4280
4281// Note: This function assumes that the global lock is held by the calling thread.
4282// For the given queue, verify the queue state up to the given seq number.
4283// Currently the only check is to make sure that if there are events to be waited on prior to
4284//  a QueryReset, make sure that all such events have been signalled.
4285static bool VerifyQueueStateToSeq(layer_data *dev_data, QUEUE_STATE *queue, uint64_t seq) {
4286    bool skip = false;
4287    auto queue_seq = queue->seq;
4288    std::unordered_map<VkQueue, uint64_t> other_queue_seqs;
4289    auto sub_it = queue->submissions.begin();
4290    while (queue_seq < seq) {
4291        for (auto &wait : sub_it->waitSemaphores) {
4292            auto &last_seq = other_queue_seqs[wait.queue];
4293            last_seq = std::max(last_seq, wait.seq);
4294        }
4295        for (auto cb : sub_it->cbs) {
4296            auto cb_node = getCBNode(dev_data, cb);
4297            if (cb_node) {
4298                for (auto queryEventsPair : cb_node->waitedEventsBeforeQueryReset) {
4299                    for (auto event : queryEventsPair.second) {
4300                        if (dev_data->eventMap[event].needsSignaled) {
4301                            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4302                                            VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, 0, DRAWSTATE_INVALID_QUERY, "DS",
4303                                            "Cannot get query results on queryPool 0x%" PRIx64
4304                                            " with index %d which was guarded by unsignaled event 0x%" PRIx64 ".",
4305                                            (uint64_t)(queryEventsPair.first.pool), queryEventsPair.first.index, (uint64_t)(event));
4306                        }
4307                    }
4308                }
4309            }
4310        }
4311        sub_it++;
4312        queue_seq++;
4313    }
4314    for (auto qs : other_queue_seqs) {
4315        skip |= VerifyQueueStateToSeq(dev_data, getQueueState(dev_data, qs.first), qs.second);
4316    }
4317    return skip;
4318}
4319
4320// When the given fence is retired, verify outstanding queue operations through the point of the fence
4321static bool VerifyQueueStateToFence(layer_data *dev_data, VkFence fence) {
4322    auto fence_state = getFenceNode(dev_data, fence);
4323    if (VK_NULL_HANDLE != fence_state->signaler.first) {
4324        return VerifyQueueStateToSeq(dev_data, getQueueState(dev_data, fence_state->signaler.first), fence_state->signaler.second);
4325    }
4326    return false;
4327}
4328
4329// TODO: nuke this completely.
4330// Decrement cmd_buffer in_use and if it goes to 0 remove cmd_buffer from globalInFlightCmdBuffers
4331static inline void removeInFlightCmdBuffer(layer_data *dev_data, VkCommandBuffer cmd_buffer) {
4332    // Pull it off of global list initially, but if we find it in any other queue list, add it back in
4333    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmd_buffer);
4334    pCB->in_use.fetch_sub(1);
4335    if (!pCB->in_use.load()) {
4336        dev_data->globalInFlightCmdBuffers.erase(cmd_buffer);
4337    }
4338}
4339
4340// Decrement in-use count for objects bound to command buffer
4341static void DecrementBoundResources(layer_data *dev_data, GLOBAL_CB_NODE const *cb_node) {
4342    BASE_NODE *base_obj = nullptr;
4343    for (auto obj : cb_node->object_bindings) {
4344        base_obj = GetStateStructPtrFromObject(dev_data, obj);
4345        if (base_obj) {
4346            base_obj->in_use.fetch_sub(1);
4347        }
4348    }
4349}
4350
4351static void RetireWorkOnQueue(layer_data *dev_data, QUEUE_STATE *pQueue, uint64_t seq) {
4352    std::unordered_map<VkQueue, uint64_t> otherQueueSeqs;
4353
4354    // Roll this queue forward, one submission at a time.
4355    while (pQueue->seq < seq) {
4356        auto &submission = pQueue->submissions.front();
4357
4358        for (auto &wait : submission.waitSemaphores) {
4359            auto pSemaphore = getSemaphoreNode(dev_data, wait.semaphore);
4360            if (pSemaphore) {
4361                pSemaphore->in_use.fetch_sub(1);
4362            }
4363            auto &lastSeq = otherQueueSeqs[wait.queue];
4364            lastSeq = std::max(lastSeq, wait.seq);
4365        }
4366
4367        for (auto &semaphore : submission.signalSemaphores) {
4368            auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
4369            if (pSemaphore) {
4370                pSemaphore->in_use.fetch_sub(1);
4371            }
4372        }
4373
4374        for (auto cb : submission.cbs) {
4375            auto cb_node = getCBNode(dev_data, cb);
4376            if (!cb_node) {
4377                continue;
4378            }
4379            // First perform decrement on general case bound objects
4380            DecrementBoundResources(dev_data, cb_node);
4381            for (auto drawDataElement : cb_node->drawData) {
4382                for (auto buffer : drawDataElement.buffers) {
4383                    auto buffer_state = getBufferState(dev_data, buffer);
4384                    if (buffer_state) {
4385                        buffer_state->in_use.fetch_sub(1);
4386                    }
4387                }
4388            }
4389            for (auto event : cb_node->writeEventsBeforeWait) {
4390                auto eventNode = dev_data->eventMap.find(event);
4391                if (eventNode != dev_data->eventMap.end()) {
4392                    eventNode->second.write_in_use--;
4393                }
4394            }
4395            for (auto queryStatePair : cb_node->queryToStateMap) {
4396                dev_data->queryToStateMap[queryStatePair.first] = queryStatePair.second;
4397            }
4398            for (auto eventStagePair : cb_node->eventToStageMap) {
4399                dev_data->eventMap[eventStagePair.first].stageMask = eventStagePair.second;
4400            }
4401
4402            removeInFlightCmdBuffer(dev_data, cb);
4403        }
4404
4405        auto pFence = getFenceNode(dev_data, submission.fence);
4406        if (pFence) {
4407            pFence->state = FENCE_RETIRED;
4408        }
4409
4410        pQueue->submissions.pop_front();
4411        pQueue->seq++;
4412    }
4413
4414    // Roll other queues forward to the highest seq we saw a wait for
4415    for (auto qs : otherQueueSeqs) {
4416        RetireWorkOnQueue(dev_data, getQueueState(dev_data, qs.first), qs.second);
4417    }
4418}
4419
4420// Submit a fence to a queue, delimiting previous fences and previous untracked
4421// work by it.
4422static void SubmitFence(QUEUE_STATE *pQueue, FENCE_NODE *pFence, uint64_t submitCount) {
4423    pFence->state = FENCE_INFLIGHT;
4424    pFence->signaler.first = pQueue->queue;
4425    pFence->signaler.second = pQueue->seq + pQueue->submissions.size() + submitCount;
4426}
4427
4428static bool validateCommandBufferSimultaneousUse(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
4429    bool skip_call = false;
4430    if (dev_data->globalInFlightCmdBuffers.count(pCB->commandBuffer) &&
4431        !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
4432        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4433                             0, __LINE__, VALIDATION_ERROR_00133, "DS",
4434                             "Command Buffer 0x%p is already in use and is not marked for simultaneous use. %s", pCB->commandBuffer,
4435                             validation_error_map[VALIDATION_ERROR_00133]);
4436    }
4437    return skip_call;
4438}
4439
4440static bool validateCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const char *call_source) {
4441    bool skip = false;
4442    if (dev_data->instance_data->disabled.command_buffer_state) return skip;
4443    // Validate ONE_TIME_SUBMIT_BIT CB is not being submitted more than once
4444    if ((pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) && (pCB->submitCount > 1)) {
4445        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4446                        __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
4447                        "Commandbuffer 0x%p was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT "
4448                        "set, but has been submitted 0x%" PRIxLEAST64 " times.",
4449                        pCB->commandBuffer, pCB->submitCount);
4450    }
4451    // Validate that cmd buffers have been updated
4452    if (CB_RECORDED != pCB->state) {
4453        if (CB_INVALID == pCB->state) {
4454            // Inform app of reason CB invalid
4455            for (auto obj : pCB->broken_bindings) {
4456                const char *type_str = object_type_to_string(obj.type);
4457                // Descriptor sets are a special case that can be either destroyed or updated to invalidated a CB
4458                const char *cause_str =
4459                    (obj.type == VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT) ? "destroyed or updated" : "destroyed";
4460
4461                skip |=
4462                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4463                            reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4464                            "You are submitting command buffer 0x%p that is invalid because bound %s 0x%" PRIxLEAST64 " was %s.",
4465                            pCB->commandBuffer, type_str, obj.handle, cause_str);
4466            }
4467        } else {  // Flag error for using CB w/o vkEndCommandBuffer() called
4468            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4469                            (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_NO_END_COMMAND_BUFFER, "DS",
4470                            "You must call vkEndCommandBuffer() on command buffer 0x%p before this call to %s!", pCB->commandBuffer,
4471                            call_source);
4472        }
4473    }
4474    return skip;
4475}
4476
4477// Validate that queueFamilyIndices of primary command buffers match this queue
4478// Secondary command buffers were previously validated in vkCmdExecuteCommands().
4479static bool validateQueueFamilyIndices(layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkQueue queue) {
4480    bool skip_call = false;
4481    auto pPool = getCommandPoolNode(dev_data, pCB->createInfo.commandPool);
4482    auto queue_state = getQueueState(dev_data, queue);
4483
4484    if (pPool && queue_state && (pPool->queueFamilyIndex != queue_state->queueFamilyIndex)) {
4485        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4486                             reinterpret_cast<uint64_t>(pCB->commandBuffer), __LINE__, VALIDATION_ERROR_00139, "DS",
4487                             "vkQueueSubmit: Primary command buffer 0x%p created in queue family %d is being submitted on queue "
4488                             "0x%p from queue family %d. %s",
4489                             pCB->commandBuffer, pPool->queueFamilyIndex, queue, queue_state->queueFamilyIndex,
4490                             validation_error_map[VALIDATION_ERROR_00139]);
4491    }
4492
4493    return skip_call;
4494}
4495
4496static bool validatePrimaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
4497    // Track in-use for resources off of primary and any secondary CBs
4498    bool skip_call = false;
4499
4500    // If USAGE_SIMULTANEOUS_USE_BIT not set then CB cannot already be executing
4501    // on device
4502    skip_call |= validateCommandBufferSimultaneousUse(dev_data, pCB);
4503
4504    skip_call |= validateAndIncrementResources(dev_data, pCB);
4505
4506    if (!pCB->secondaryCommandBuffers.empty()) {
4507        for (auto secondaryCmdBuffer : pCB->secondaryCommandBuffers) {
4508            GLOBAL_CB_NODE *pSubCB = getCBNode(dev_data, secondaryCmdBuffer);
4509            skip_call |= validateAndIncrementResources(dev_data, pSubCB);
4510            if ((pSubCB->primaryCommandBuffer != pCB->commandBuffer) &&
4511                !(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
4512                log_msg(
4513                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4514                    __LINE__, VALIDATION_ERROR_00135, "DS",
4515                    "Commandbuffer 0x%p was submitted with secondary buffer 0x%p but that buffer has subsequently been bound to "
4516                    "primary cmd buffer 0x%p and it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set. %s",
4517                    pCB->commandBuffer, secondaryCmdBuffer, pSubCB->primaryCommandBuffer,
4518                    validation_error_map[VALIDATION_ERROR_00135]);
4519            }
4520        }
4521    }
4522
4523    skip_call |= validateCommandBufferState(dev_data, pCB, "vkQueueSubmit()");
4524
4525    return skip_call;
4526}
4527
4528static bool ValidateFenceForSubmit(layer_data *dev_data, FENCE_NODE *pFence) {
4529    bool skip_call = false;
4530
4531    if (pFence) {
4532        if (pFence->state == FENCE_INFLIGHT) {
4533            // TODO: opportunities for VALIDATION_ERROR_00127, VALIDATION_ERROR_01647, VALIDATION_ERROR_01953
4534            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4535                                 (uint64_t)(pFence->fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
4536                                 "Fence 0x%" PRIx64 " is already in use by another submission.", (uint64_t)(pFence->fence));
4537        }
4538
4539        else if (pFence->state == FENCE_RETIRED) {
4540            // TODO: opportunities for VALIDATION_ERROR_00126, VALIDATION_ERROR_01646, VALIDATION_ERROR_01953
4541            skip_call |=
4542                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4543                        reinterpret_cast<uint64_t &>(pFence->fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
4544                        "Fence 0x%" PRIxLEAST64 " submitted in SIGNALED state.  Fences must be reset before being submitted",
4545                        reinterpret_cast<uint64_t &>(pFence->fence));
4546        }
4547    }
4548
4549    return skip_call;
4550}
4551
4552VKAPI_ATTR VkResult VKAPI_CALL QueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
4553    bool skip_call = false;
4554    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
4555    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
4556    std::unique_lock<std::mutex> lock(global_lock);
4557
4558    auto pQueue = getQueueState(dev_data, queue);
4559    auto pFence = getFenceNode(dev_data, fence);
4560    skip_call |= ValidateFenceForSubmit(dev_data, pFence);
4561
4562    if (skip_call) {
4563        return VK_ERROR_VALIDATION_FAILED_EXT;
4564    }
4565
4566    // Mark the fence in-use.
4567    if (pFence) {
4568        SubmitFence(pQueue, pFence, std::max(1u, submitCount));
4569    }
4570
4571    // Now verify each individual submit
4572    for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
4573        const VkSubmitInfo *submit = &pSubmits[submit_idx];
4574        vector<SEMAPHORE_WAIT> semaphore_waits;
4575        vector<VkSemaphore> semaphore_signals;
4576        for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
4577            skip_call |= ValidateStageMaskGsTsEnables(dev_data, submit->pWaitDstStageMask[i], "vkQueueSubmit()",
4578                                                      VALIDATION_ERROR_00142, VALIDATION_ERROR_00143);
4579            VkSemaphore semaphore = submit->pWaitSemaphores[i];
4580            auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
4581            if (pSemaphore) {
4582                if (pSemaphore->signaled) {
4583                    if (pSemaphore->signaler.first != VK_NULL_HANDLE) {
4584                        semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second});
4585                        pSemaphore->in_use.fetch_add(1);
4586                    }
4587                    pSemaphore->signaler.first = VK_NULL_HANDLE;
4588                    pSemaphore->signaled = false;
4589                } else {
4590                    skip_call |=
4591                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
4592                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
4593                                "Queue 0x%p is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.", queue,
4594                                reinterpret_cast<const uint64_t &>(semaphore));
4595                }
4596            }
4597        }
4598        for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
4599            VkSemaphore semaphore = submit->pSignalSemaphores[i];
4600            auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
4601            if (pSemaphore) {
4602                if (pSemaphore->signaled) {
4603                    skip_call |=
4604                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
4605                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
4606                                "Queue 0x%p is signaling semaphore 0x%" PRIx64
4607                                " that has already been signaled but not waited on by queue 0x%" PRIx64 ".",
4608                                queue, reinterpret_cast<const uint64_t &>(semaphore),
4609                                reinterpret_cast<uint64_t &>(pSemaphore->signaler.first));
4610                } else {
4611                    pSemaphore->signaler.first = queue;
4612                    pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1;
4613                    pSemaphore->signaled = true;
4614                    pSemaphore->in_use.fetch_add(1);
4615                    semaphore_signals.push_back(semaphore);
4616                }
4617            }
4618        }
4619
4620        std::vector<VkCommandBuffer> cbs;
4621
4622        for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
4623            auto cb_node = getCBNode(dev_data, submit->pCommandBuffers[i]);
4624            skip_call |= ValidateCmdBufImageLayouts(dev_data, cb_node);
4625            if (cb_node) {
4626                cbs.push_back(submit->pCommandBuffers[i]);
4627                for (auto secondaryCmdBuffer : cb_node->secondaryCommandBuffers) {
4628                    cbs.push_back(secondaryCmdBuffer);
4629                }
4630
4631                cb_node->submitCount++;  // increment submit count
4632                skip_call |= validatePrimaryCommandBufferState(dev_data, cb_node);
4633                skip_call |= validateQueueFamilyIndices(dev_data, cb_node, queue);
4634                // Potential early exit here as bad object state may crash in delayed function calls
4635                if (skip_call) return result;
4636                // Call submit-time functions to validate/update state
4637                for (auto &function : cb_node->validate_functions) {
4638                    skip_call |= function();
4639                }
4640                for (auto &function : cb_node->eventUpdates) {
4641                    skip_call |= function(queue);
4642                }
4643                for (auto &function : cb_node->queryUpdates) {
4644                    skip_call |= function(queue);
4645                }
4646            }
4647        }
4648
4649        pQueue->submissions.emplace_back(cbs, semaphore_waits, semaphore_signals,
4650                                         submit_idx == submitCount - 1 ? fence : VK_NULL_HANDLE);
4651    }
4652
4653    if (pFence && !submitCount) {
4654        // If no submissions, but just dropping a fence on the end of the queue,
4655        // record an empty submission with just the fence, so we can determine
4656        // its completion.
4657        pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(), std::vector<SEMAPHORE_WAIT>(), std::vector<VkSemaphore>(),
4658                                         fence);
4659    }
4660
4661    lock.unlock();
4662    if (!skip_call) result = dev_data->dispatch_table.QueueSubmit(queue, submitCount, pSubmits, fence);
4663
4664    return result;
4665}
4666
4667static bool PreCallValidateAllocateMemory(layer_data *dev_data) {
4668    bool skip = false;
4669    if (dev_data->memObjMap.size() >= dev_data->phys_dev_properties.properties.limits.maxMemoryAllocationCount) {
4670        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
4671                        reinterpret_cast<const uint64_t &>(dev_data->device), __LINE__, VALIDATION_ERROR_00611, "MEM",
4672                        "Number of currently valid memory objects is not less than the maximum allowed (%u). %s",
4673                        dev_data->phys_dev_properties.properties.limits.maxMemoryAllocationCount,
4674                        validation_error_map[VALIDATION_ERROR_00611]);
4675    }
4676    return skip;
4677}
4678
4679static void PostCallRecordAllocateMemory(layer_data *dev_data, const VkMemoryAllocateInfo *pAllocateInfo, VkDeviceMemory *pMemory) {
4680    add_mem_obj_info(dev_data, dev_data->device, *pMemory, pAllocateInfo);
4681    return;
4682}
4683
4684VKAPI_ATTR VkResult VKAPI_CALL AllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
4685                                              const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) {
4686    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
4687    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4688    std::unique_lock<std::mutex> lock(global_lock);
4689    bool skip = PreCallValidateAllocateMemory(dev_data);
4690    if (!skip) {
4691        lock.unlock();
4692        result = dev_data->dispatch_table.AllocateMemory(device, pAllocateInfo, pAllocator, pMemory);
4693        lock.lock();
4694        if (VK_SUCCESS == result) {
4695            PostCallRecordAllocateMemory(dev_data, pAllocateInfo, pMemory);
4696        }
4697    }
4698    return result;
4699}
4700
4701// For given obj node, if it is use, flag a validation error and return callback result, else return false
4702bool ValidateObjectNotInUse(const layer_data *dev_data, BASE_NODE *obj_node, VK_OBJECT obj_struct,
4703                            UNIQUE_VALIDATION_ERROR_CODE error_code) {
4704    if (dev_data->instance_data->disabled.object_in_use) return false;
4705    bool skip = false;
4706    if (obj_node->in_use.load()) {
4707        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_struct.type, obj_struct.handle, __LINE__,
4708                        error_code, "DS", "Cannot delete %s 0x%" PRIx64 " that is currently in use by a command buffer. %s",
4709                        object_type_to_string(obj_struct.type), obj_struct.handle, validation_error_map[error_code]);
4710    }
4711    return skip;
4712}
4713
4714static bool PreCallValidateFreeMemory(layer_data *dev_data, VkDeviceMemory mem, DEVICE_MEM_INFO **mem_info, VK_OBJECT *obj_struct) {
4715    *mem_info = getMemObjInfo(dev_data, mem);
4716    *obj_struct = {reinterpret_cast<uint64_t &>(mem), VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT};
4717    if (dev_data->instance_data->disabled.free_memory) return false;
4718    bool skip = false;
4719    if (*mem_info) {
4720        skip |= ValidateObjectNotInUse(dev_data, *mem_info, *obj_struct, VALIDATION_ERROR_00620);
4721    }
4722    return skip;
4723}
4724
4725static void PostCallRecordFreeMemory(layer_data *dev_data, VkDeviceMemory mem, DEVICE_MEM_INFO *mem_info, VK_OBJECT obj_struct) {
4726    // Clear mem binding for any bound objects
4727    for (auto obj : mem_info->obj_bindings) {
4728        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, obj.type, obj.handle, __LINE__, MEMTRACK_FREED_MEM_REF,
4729                "MEM", "VK Object 0x%" PRIxLEAST64 " still has a reference to mem obj 0x%" PRIxLEAST64, obj.handle,
4730                (uint64_t)mem_info->mem);
4731        switch (obj.type) {
4732            case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
4733                auto image_state = getImageState(dev_data, reinterpret_cast<VkImage &>(obj.handle));
4734                assert(image_state);  // Any destroyed images should already be removed from bindings
4735                image_state->binding.mem = MEMORY_UNBOUND;
4736                break;
4737            }
4738            case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
4739                auto buffer_state = getBufferState(dev_data, reinterpret_cast<VkBuffer &>(obj.handle));
4740                assert(buffer_state);  // Any destroyed buffers should already be removed from bindings
4741                buffer_state->binding.mem = MEMORY_UNBOUND;
4742                break;
4743            }
4744            default:
4745                // Should only have buffer or image objects bound to memory
4746                assert(0);
4747        }
4748    }
4749    // Any bound cmd buffers are now invalid
4750    invalidateCommandBuffers(dev_data, mem_info->cb_bindings, obj_struct);
4751    dev_data->memObjMap.erase(mem);
4752}
4753
4754VKAPI_ATTR void VKAPI_CALL FreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
4755    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4756    DEVICE_MEM_INFO *mem_info = nullptr;
4757    VK_OBJECT obj_struct;
4758    std::unique_lock<std::mutex> lock(global_lock);
4759    bool skip = PreCallValidateFreeMemory(dev_data, mem, &mem_info, &obj_struct);
4760    if (!skip) {
4761        lock.unlock();
4762        dev_data->dispatch_table.FreeMemory(device, mem, pAllocator);
4763        lock.lock();
4764        if (mem != VK_NULL_HANDLE) {
4765            PostCallRecordFreeMemory(dev_data, mem, mem_info, obj_struct);
4766        }
4767    }
4768}
4769
4770// Validate that given Map memory range is valid. This means that the memory should not already be mapped,
4771//  and that the size of the map range should be:
4772//  1. Not zero
4773//  2. Within the size of the memory allocation
4774static bool ValidateMapMemRange(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
4775    bool skip_call = false;
4776
4777    if (size == 0) {
4778        skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
4779                            (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
4780                            "VkMapMemory: Attempting to map memory range of size zero");
4781    }
4782
4783    auto mem_element = my_data->memObjMap.find(mem);
4784    if (mem_element != my_data->memObjMap.end()) {
4785        auto mem_info = mem_element->second.get();
4786        // It is an application error to call VkMapMemory on an object that is already mapped
4787        if (mem_info->mem_range.size != 0) {
4788            skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
4789                                (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
4790                                "VkMapMemory: Attempting to map memory on an already-mapped object 0x%" PRIxLEAST64, (uint64_t)mem);
4791        }
4792
4793        // Validate that offset + size is within object's allocationSize
4794        if (size == VK_WHOLE_SIZE) {
4795            if (offset >= mem_info->alloc_info.allocationSize) {
4796                skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4797                                    VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP,
4798                                    "MEM", "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64
4799                                           " with size of VK_WHOLE_SIZE oversteps total array size 0x%" PRIx64,
4800                                    offset, mem_info->alloc_info.allocationSize, mem_info->alloc_info.allocationSize);
4801            }
4802        } else {
4803            if ((offset + size) > mem_info->alloc_info.allocationSize) {
4804                skip_call = log_msg(
4805                    my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
4806                    (uint64_t)mem, __LINE__, VALIDATION_ERROR_00628, "MEM",
4807                    "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64 " oversteps total array size 0x%" PRIx64 ". %s", offset,
4808                    size + offset, mem_info->alloc_info.allocationSize, validation_error_map[VALIDATION_ERROR_00628]);
4809            }
4810        }
4811    }
4812    return skip_call;
4813}
4814
4815static void storeMemRanges(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
4816    auto mem_info = getMemObjInfo(my_data, mem);
4817    if (mem_info) {
4818        mem_info->mem_range.offset = offset;
4819        mem_info->mem_range.size = size;
4820    }
4821}
4822
4823static bool deleteMemRanges(layer_data *my_data, VkDeviceMemory mem) {
4824    bool skip_call = false;
4825    auto mem_info = getMemObjInfo(my_data, mem);
4826    if (mem_info) {
4827        if (!mem_info->mem_range.size) {
4828            // Valid Usage: memory must currently be mapped
4829            skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
4830                                (uint64_t)mem, __LINE__, VALIDATION_ERROR_00649, "MEM",
4831                                "Unmapping Memory without memory being mapped: mem obj 0x%" PRIxLEAST64 ". %s", (uint64_t)mem,
4832                                validation_error_map[VALIDATION_ERROR_00649]);
4833        }
4834        mem_info->mem_range.size = 0;
4835        if (mem_info->shadow_copy) {
4836            free(mem_info->shadow_copy_base);
4837            mem_info->shadow_copy_base = 0;
4838            mem_info->shadow_copy = 0;
4839        }
4840    }
4841    return skip_call;
4842}
4843
4844// Guard value for pad data
4845static char NoncoherentMemoryFillValue = 0xb;
4846
4847static void initializeAndTrackMemory(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size,
4848                                     void **ppData) {
4849    auto mem_info = getMemObjInfo(dev_data, mem);
4850    if (mem_info) {
4851        mem_info->p_driver_data = *ppData;
4852        uint32_t index = mem_info->alloc_info.memoryTypeIndex;
4853        if (dev_data->phys_dev_mem_props.memoryTypes[index].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) {
4854            mem_info->shadow_copy = 0;
4855        } else {
4856            if (size == VK_WHOLE_SIZE) {
4857                size = mem_info->alloc_info.allocationSize - offset;
4858            }
4859            mem_info->shadow_pad_size = dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment;
4860            assert(vk_safe_modulo(mem_info->shadow_pad_size,
4861                                  dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment) == 0);
4862            // Ensure start of mapped region reflects hardware alignment constraints
4863            uint64_t map_alignment = dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment;
4864
4865            // From spec: (ppData - offset) must be aligned to at least limits::minMemoryMapAlignment.
4866            uint64_t start_offset = offset % map_alignment;
4867            // Data passed to driver will be wrapped by a guardband of data to detect over- or under-writes.
4868            mem_info->shadow_copy_base =
4869                malloc(static_cast<size_t>(2 * mem_info->shadow_pad_size + size + map_alignment + start_offset));
4870
4871            mem_info->shadow_copy =
4872                reinterpret_cast<char *>((reinterpret_cast<uintptr_t>(mem_info->shadow_copy_base) + map_alignment) &
4873                                         ~(map_alignment - 1)) +
4874                start_offset;
4875            assert(vk_safe_modulo(reinterpret_cast<uintptr_t>(mem_info->shadow_copy) + mem_info->shadow_pad_size - start_offset,
4876                                  map_alignment) == 0);
4877
4878            memset(mem_info->shadow_copy, NoncoherentMemoryFillValue, static_cast<size_t>(2 * mem_info->shadow_pad_size + size));
4879            *ppData = static_cast<char *>(mem_info->shadow_copy) + mem_info->shadow_pad_size;
4880        }
4881    }
4882}
4883
4884// Verify that state for fence being waited on is appropriate. That is,
4885//  a fence being waited on should not already be signaled and
4886//  it should have been submitted on a queue or during acquire next image
4887static inline bool verifyWaitFenceState(layer_data *dev_data, VkFence fence, const char *apiCall) {
4888    bool skip_call = false;
4889
4890    auto pFence = getFenceNode(dev_data, fence);
4891    if (pFence) {
4892        if (pFence->state == FENCE_UNSIGNALED) {
4893            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4894                                 reinterpret_cast<uint64_t &>(fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
4895                                 "%s called for fence 0x%" PRIxLEAST64
4896                                 " which has not been submitted on a Queue or during "
4897                                 "acquire next image.",
4898                                 apiCall, reinterpret_cast<uint64_t &>(fence));
4899        }
4900    }
4901    return skip_call;
4902}
4903
4904static void RetireFence(layer_data *dev_data, VkFence fence) {
4905    auto pFence = getFenceNode(dev_data, fence);
4906    if (pFence->signaler.first != VK_NULL_HANDLE) {
4907        // Fence signaller is a queue -- use this as proof that prior operations on that queue have completed.
4908        RetireWorkOnQueue(dev_data, getQueueState(dev_data, pFence->signaler.first), pFence->signaler.second);
4909    } else {
4910        // Fence signaller is the WSI. We're not tracking what the WSI op actually /was/ in CV yet, but we need to mark
4911        // the fence as retired.
4912        pFence->state = FENCE_RETIRED;
4913    }
4914}
4915
4916static bool PreCallValidateWaitForFences(layer_data *dev_data, uint32_t fence_count, const VkFence *fences) {
4917    if (dev_data->instance_data->disabled.wait_for_fences) return false;
4918    bool skip = false;
4919    for (uint32_t i = 0; i < fence_count; i++) {
4920        skip |= verifyWaitFenceState(dev_data, fences[i], "vkWaitForFences");
4921        skip |= VerifyQueueStateToFence(dev_data, fences[i]);
4922    }
4923    return skip;
4924}
4925
4926static void PostCallRecordWaitForFences(layer_data *dev_data, uint32_t fence_count, const VkFence *fences, VkBool32 wait_all) {
4927    // When we know that all fences are complete we can clean/remove their CBs
4928    if ((VK_TRUE == wait_all) || (1 == fence_count)) {
4929        for (uint32_t i = 0; i < fence_count; i++) {
4930            RetireFence(dev_data, fences[i]);
4931        }
4932    }
4933    // NOTE : Alternate case not handled here is when some fences have completed. In
4934    //  this case for app to guarantee which fences completed it will have to call
4935    //  vkGetFenceStatus() at which point we'll clean/remove their CBs if complete.
4936}
4937
4938VKAPI_ATTR VkResult VKAPI_CALL WaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll,
4939                                             uint64_t timeout) {
4940    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4941    // Verify fence status of submitted fences
4942    std::unique_lock<std::mutex> lock(global_lock);
4943    bool skip = PreCallValidateWaitForFences(dev_data, fenceCount, pFences);
4944    lock.unlock();
4945    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4946
4947    VkResult result = dev_data->dispatch_table.WaitForFences(device, fenceCount, pFences, waitAll, timeout);
4948
4949    if (result == VK_SUCCESS) {
4950        lock.lock();
4951        PostCallRecordWaitForFences(dev_data, fenceCount, pFences, waitAll);
4952        lock.unlock();
4953    }
4954    return result;
4955}
4956
4957static bool PreCallValidateGetFenceStatus(layer_data *dev_data, VkFence fence) {
4958    if (dev_data->instance_data->disabled.get_fence_state) return false;
4959    return verifyWaitFenceState(dev_data, fence, "vkGetFenceStatus");
4960}
4961
4962static void PostCallRecordGetFenceStatus(layer_data *dev_data, VkFence fence) { RetireFence(dev_data, fence); }
4963
4964VKAPI_ATTR VkResult VKAPI_CALL GetFenceStatus(VkDevice device, VkFence fence) {
4965    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4966    std::unique_lock<std::mutex> lock(global_lock);
4967    bool skip = PreCallValidateGetFenceStatus(dev_data, fence);
4968    lock.unlock();
4969    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4970
4971    VkResult result = dev_data->dispatch_table.GetFenceStatus(device, fence);
4972    if (result == VK_SUCCESS) {
4973        lock.lock();
4974        PostCallRecordGetFenceStatus(dev_data, fence);
4975        lock.unlock();
4976    }
4977    return result;
4978}
4979
4980static void PostCallRecordGetDeviceQueue(layer_data *dev_data, uint32_t q_family_index, VkQueue queue) {
4981    // Add queue to tracking set only if it is new
4982    auto result = dev_data->queues.emplace(queue);
4983    if (result.second == true) {
4984        QUEUE_STATE *queue_state = &dev_data->queueMap[queue];
4985        queue_state->queue = queue;
4986        queue_state->queueFamilyIndex = q_family_index;
4987        queue_state->seq = 0;
4988    }
4989}
4990
4991VKAPI_ATTR void VKAPI_CALL GetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue *pQueue) {
4992    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4993    dev_data->dispatch_table.GetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue);
4994    std::lock_guard<std::mutex> lock(global_lock);
4995
4996    PostCallRecordGetDeviceQueue(dev_data, queueFamilyIndex, *pQueue);
4997}
4998
4999static bool PreCallValidateQueueWaitIdle(layer_data *dev_data, VkQueue queue, QUEUE_STATE **queue_state) {
5000    *queue_state = getQueueState(dev_data, queue);
5001    if (dev_data->instance_data->disabled.queue_wait_idle) return false;
5002    return VerifyQueueStateToSeq(dev_data, *queue_state, (*queue_state)->seq + (*queue_state)->submissions.size());
5003}
5004
5005static void PostCallRecordQueueWaitIdle(layer_data *dev_data, QUEUE_STATE *queue_state) {
5006    RetireWorkOnQueue(dev_data, queue_state, queue_state->seq + queue_state->submissions.size());
5007}
5008
5009VKAPI_ATTR VkResult VKAPI_CALL QueueWaitIdle(VkQueue queue) {
5010    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
5011    QUEUE_STATE *queue_state = nullptr;
5012    std::unique_lock<std::mutex> lock(global_lock);
5013    bool skip = PreCallValidateQueueWaitIdle(dev_data, queue, &queue_state);
5014    lock.unlock();
5015    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
5016    VkResult result = dev_data->dispatch_table.QueueWaitIdle(queue);
5017    if (VK_SUCCESS == result) {
5018        lock.lock();
5019        PostCallRecordQueueWaitIdle(dev_data, queue_state);
5020        lock.unlock();
5021    }
5022    return result;
5023}
5024
5025static bool PreCallValidateDeviceWaitIdle(layer_data *dev_data) {
5026    if (dev_data->instance_data->disabled.device_wait_idle) return false;
5027    bool skip = false;
5028    for (auto &queue : dev_data->queueMap) {
5029        skip |= VerifyQueueStateToSeq(dev_data, &queue.second, queue.second.seq + queue.second.submissions.size());
5030    }
5031    return skip;
5032}
5033
5034static void PostCallRecordDeviceWaitIdle(layer_data *dev_data) {
5035    for (auto &queue : dev_data->queueMap) {
5036        RetireWorkOnQueue(dev_data, &queue.second, queue.second.seq + queue.second.submissions.size());
5037    }
5038}
5039
5040VKAPI_ATTR VkResult VKAPI_CALL DeviceWaitIdle(VkDevice device) {
5041    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5042    std::unique_lock<std::mutex> lock(global_lock);
5043    bool skip = PreCallValidateDeviceWaitIdle(dev_data);
5044    lock.unlock();
5045    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
5046    VkResult result = dev_data->dispatch_table.DeviceWaitIdle(device);
5047    if (VK_SUCCESS == result) {
5048        lock.lock();
5049        PostCallRecordDeviceWaitIdle(dev_data);
5050        lock.unlock();
5051    }
5052    return result;
5053}
5054
5055static bool PreCallValidateDestroyFence(layer_data *dev_data, VkFence fence, FENCE_NODE **fence_node, VK_OBJECT *obj_struct) {
5056    *fence_node = getFenceNode(dev_data, fence);
5057    *obj_struct = {reinterpret_cast<uint64_t &>(fence), VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT};
5058    if (dev_data->instance_data->disabled.destroy_fence) return false;
5059    bool skip = false;
5060    if (*fence_node) {
5061        if ((*fence_node)->state == FENCE_INFLIGHT) {
5062            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5063                            (uint64_t)(fence), __LINE__, VALIDATION_ERROR_00173, "DS", "Fence 0x%" PRIx64 " is in use. %s",
5064                            (uint64_t)(fence), validation_error_map[VALIDATION_ERROR_00173]);
5065        }
5066    }
5067    return skip;
5068}
5069
5070static void PostCallRecordDestroyFence(layer_data *dev_data, VkFence fence) { dev_data->fenceMap.erase(fence); }
5071
5072VKAPI_ATTR void VKAPI_CALL DestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) {
5073    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5074    // Common data objects used pre & post call
5075    FENCE_NODE *fence_node = nullptr;
5076    VK_OBJECT obj_struct;
5077    std::unique_lock<std::mutex> lock(global_lock);
5078    bool skip = PreCallValidateDestroyFence(dev_data, fence, &fence_node, &obj_struct);
5079
5080    if (!skip) {
5081        lock.unlock();
5082        dev_data->dispatch_table.DestroyFence(device, fence, pAllocator);
5083        lock.lock();
5084        PostCallRecordDestroyFence(dev_data, fence);
5085    }
5086}
5087
5088static bool PreCallValidateDestroySemaphore(layer_data *dev_data, VkSemaphore semaphore, SEMAPHORE_NODE **sema_node,
5089                                            VK_OBJECT *obj_struct) {
5090    *sema_node = getSemaphoreNode(dev_data, semaphore);
5091    *obj_struct = {reinterpret_cast<uint64_t &>(semaphore), VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT};
5092    if (dev_data->instance_data->disabled.destroy_semaphore) return false;
5093    bool skip = false;
5094    if (*sema_node) {
5095        skip |= ValidateObjectNotInUse(dev_data, *sema_node, *obj_struct, VALIDATION_ERROR_00199);
5096    }
5097    return skip;
5098}
5099
5100static void PostCallRecordDestroySemaphore(layer_data *dev_data, VkSemaphore sema) { dev_data->semaphoreMap.erase(sema); }
5101
5102VKAPI_ATTR void VKAPI_CALL DestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) {
5103    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5104    SEMAPHORE_NODE *sema_node;
5105    VK_OBJECT obj_struct;
5106    std::unique_lock<std::mutex> lock(global_lock);
5107    bool skip = PreCallValidateDestroySemaphore(dev_data, semaphore, &sema_node, &obj_struct);
5108    if (!skip) {
5109        lock.unlock();
5110        dev_data->dispatch_table.DestroySemaphore(device, semaphore, pAllocator);
5111        lock.lock();
5112        PostCallRecordDestroySemaphore(dev_data, semaphore);
5113    }
5114}
5115
5116static bool PreCallValidateDestroyEvent(layer_data *dev_data, VkEvent event, EVENT_STATE **event_state, VK_OBJECT *obj_struct) {
5117    *event_state = getEventNode(dev_data, event);
5118    *obj_struct = {reinterpret_cast<uint64_t &>(event), VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT};
5119    if (dev_data->instance_data->disabled.destroy_event) return false;
5120    bool skip = false;
5121    if (*event_state) {
5122        skip |= ValidateObjectNotInUse(dev_data, *event_state, *obj_struct, VALIDATION_ERROR_00213);
5123    }
5124    return skip;
5125}
5126
5127static void PostCallRecordDestroyEvent(layer_data *dev_data, VkEvent event, EVENT_STATE *event_state, VK_OBJECT obj_struct) {
5128    invalidateCommandBuffers(dev_data, event_state->cb_bindings, obj_struct);
5129    dev_data->eventMap.erase(event);
5130}
5131
5132VKAPI_ATTR void VKAPI_CALL DestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
5133    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5134    EVENT_STATE *event_state = nullptr;
5135    VK_OBJECT obj_struct;
5136    std::unique_lock<std::mutex> lock(global_lock);
5137    bool skip = PreCallValidateDestroyEvent(dev_data, event, &event_state, &obj_struct);
5138    if (!skip) {
5139        lock.unlock();
5140        dev_data->dispatch_table.DestroyEvent(device, event, pAllocator);
5141        lock.lock();
5142        if (event != VK_NULL_HANDLE) {
5143            PostCallRecordDestroyEvent(dev_data, event, event_state, obj_struct);
5144        }
5145    }
5146}
5147
5148static bool PreCallValidateDestroyQueryPool(layer_data *dev_data, VkQueryPool query_pool, QUERY_POOL_NODE **qp_state,
5149                                            VK_OBJECT *obj_struct) {
5150    *qp_state = getQueryPoolNode(dev_data, query_pool);
5151    *obj_struct = {reinterpret_cast<uint64_t &>(query_pool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT};
5152    if (dev_data->instance_data->disabled.destroy_query_pool) return false;
5153    bool skip = false;
5154    if (*qp_state) {
5155        skip |= ValidateObjectNotInUse(dev_data, *qp_state, *obj_struct, VALIDATION_ERROR_01012);
5156    }
5157    return skip;
5158}
5159
5160static void PostCallRecordDestroyQueryPool(layer_data *dev_data, VkQueryPool query_pool, QUERY_POOL_NODE *qp_state,
5161                                           VK_OBJECT obj_struct) {
5162    invalidateCommandBuffers(dev_data, qp_state->cb_bindings, obj_struct);
5163    dev_data->queryPoolMap.erase(query_pool);
5164}
5165
5166VKAPI_ATTR void VKAPI_CALL DestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) {
5167    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5168    QUERY_POOL_NODE *qp_state = nullptr;
5169    VK_OBJECT obj_struct;
5170    std::unique_lock<std::mutex> lock(global_lock);
5171    bool skip = PreCallValidateDestroyQueryPool(dev_data, queryPool, &qp_state, &obj_struct);
5172    if (!skip) {
5173        lock.unlock();
5174        dev_data->dispatch_table.DestroyQueryPool(device, queryPool, pAllocator);
5175        lock.lock();
5176        if (queryPool != VK_NULL_HANDLE) {
5177            PostCallRecordDestroyQueryPool(dev_data, queryPool, qp_state, obj_struct);
5178        }
5179    }
5180}
5181static bool PreCallValidateGetQueryPoolResults(layer_data *dev_data, VkQueryPool query_pool, uint32_t first_query,
5182                                               uint32_t query_count, VkQueryResultFlags flags,
5183                                               unordered_map<QueryObject, vector<VkCommandBuffer>> *queries_in_flight) {
5184    for (auto cmd_buffer : dev_data->globalInFlightCmdBuffers) {
5185        auto cb = getCBNode(dev_data, cmd_buffer);
5186        for (auto query_state_pair : cb->queryToStateMap) {
5187            (*queries_in_flight)[query_state_pair.first].push_back(cmd_buffer);
5188        }
5189    }
5190    if (dev_data->instance_data->disabled.get_query_pool_results) return false;
5191    bool skip = false;
5192    for (uint32_t i = 0; i < query_count; ++i) {
5193        QueryObject query = {query_pool, first_query + i};
5194        auto qif_pair = queries_in_flight->find(query);
5195        auto query_state_pair = dev_data->queryToStateMap.find(query);
5196        if (query_state_pair != dev_data->queryToStateMap.end()) {
5197            // Available and in flight
5198            if (qif_pair != queries_in_flight->end() && query_state_pair != dev_data->queryToStateMap.end() &&
5199                query_state_pair->second) {
5200                for (auto cmd_buffer : qif_pair->second) {
5201                    auto cb = getCBNode(dev_data, cmd_buffer);
5202                    auto query_event_pair = cb->waitedEventsBeforeQueryReset.find(query);
5203                    if (query_event_pair == cb->waitedEventsBeforeQueryReset.end()) {
5204                        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5205                                        VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5206                                        "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is in flight.",
5207                                        (uint64_t)(query_pool), first_query + i);
5208                    }
5209                }
5210                // Unavailable and in flight
5211            } else if (qif_pair != queries_in_flight->end() && query_state_pair != dev_data->queryToStateMap.end() &&
5212                       !query_state_pair->second) {
5213                // TODO : Can there be the same query in use by multiple command buffers in flight?
5214                bool make_available = false;
5215                for (auto cmd_buffer : qif_pair->second) {
5216                    auto cb = getCBNode(dev_data, cmd_buffer);
5217                    make_available |= cb->queryToStateMap[query];
5218                }
5219                if (!(((flags & VK_QUERY_RESULT_PARTIAL_BIT) || (flags & VK_QUERY_RESULT_WAIT_BIT)) && make_available)) {
5220                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5221                                    VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5222                                    "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is unavailable.",
5223                                    (uint64_t)(query_pool), first_query + i);
5224                }
5225                // Unavailable
5226            } else if (query_state_pair != dev_data->queryToStateMap.end() && !query_state_pair->second) {
5227                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
5228                                __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5229                                "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is unavailable.",
5230                                (uint64_t)(query_pool), first_query + i);
5231                // Uninitialized
5232            } else if (query_state_pair == dev_data->queryToStateMap.end()) {
5233                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
5234                                __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5235                                "Cannot get query results on queryPool 0x%" PRIx64
5236                                " with index %d as data has not been collected for this index.",
5237                                (uint64_t)(query_pool), first_query + i);
5238            }
5239        }
5240    }
5241    return skip;
5242}
5243
5244static void PostCallRecordGetQueryPoolResults(layer_data *dev_data, VkQueryPool query_pool, uint32_t first_query,
5245                                              uint32_t query_count,
5246                                              unordered_map<QueryObject, vector<VkCommandBuffer>> *queries_in_flight) {
5247    for (uint32_t i = 0; i < query_count; ++i) {
5248        QueryObject query = {query_pool, first_query + i};
5249        auto qif_pair = queries_in_flight->find(query);
5250        auto query_state_pair = dev_data->queryToStateMap.find(query);
5251        if (query_state_pair != dev_data->queryToStateMap.end()) {
5252            // Available and in flight
5253            if (qif_pair != queries_in_flight->end() && query_state_pair != dev_data->queryToStateMap.end() &&
5254                query_state_pair->second) {
5255                for (auto cmd_buffer : qif_pair->second) {
5256                    auto cb = getCBNode(dev_data, cmd_buffer);
5257                    auto query_event_pair = cb->waitedEventsBeforeQueryReset.find(query);
5258                    if (query_event_pair != cb->waitedEventsBeforeQueryReset.end()) {
5259                        for (auto event : query_event_pair->second) {
5260                            dev_data->eventMap[event].needsSignaled = true;
5261                        }
5262                    }
5263                }
5264            }
5265        }
5266    }
5267}
5268
5269VKAPI_ATTR VkResult VKAPI_CALL GetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
5270                                                   size_t dataSize, void *pData, VkDeviceSize stride, VkQueryResultFlags flags) {
5271    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5272    unordered_map<QueryObject, vector<VkCommandBuffer>> queries_in_flight;
5273    std::unique_lock<std::mutex> lock(global_lock);
5274    bool skip = PreCallValidateGetQueryPoolResults(dev_data, queryPool, firstQuery, queryCount, flags, &queries_in_flight);
5275    lock.unlock();
5276    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
5277    VkResult result =
5278        dev_data->dispatch_table.GetQueryPoolResults(device, queryPool, firstQuery, queryCount, dataSize, pData, stride, flags);
5279    lock.lock();
5280    PostCallRecordGetQueryPoolResults(dev_data, queryPool, firstQuery, queryCount, &queries_in_flight);
5281    lock.unlock();
5282    return result;
5283}
5284
5285static bool validateIdleBuffer(const layer_data *my_data, VkBuffer buffer) {
5286    bool skip_call = false;
5287    auto buffer_state = getBufferState(my_data, buffer);
5288    if (!buffer_state) {
5289        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5290                             (uint64_t)(buffer), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
5291                             "Cannot free buffer 0x%" PRIxLEAST64 " that has not been allocated.", (uint64_t)(buffer));
5292    } else {
5293        if (buffer_state->in_use.load()) {
5294            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5295                                 (uint64_t)(buffer), __LINE__, VALIDATION_ERROR_00676, "DS",
5296                                 "Cannot free buffer 0x%" PRIxLEAST64 " that is in use by a command buffer. %s", (uint64_t)(buffer),
5297                                 validation_error_map[VALIDATION_ERROR_00676]);
5298        }
5299    }
5300    return skip_call;
5301}
5302
5303// Return true if given ranges intersect, else false
5304// Prereq : For both ranges, range->end - range->start > 0. This case should have already resulted
5305//  in an error so not checking that here
5306// pad_ranges bool indicates a linear and non-linear comparison which requires padding
5307// In the case where padding is required, if an alias is encountered then a validation error is reported and skip_call
5308//  may be set by the callback function so caller should merge in skip_call value if padding case is possible.
5309static bool rangesIntersect(layer_data const *dev_data, MEMORY_RANGE const *range1, MEMORY_RANGE const *range2, bool *skip_call) {
5310    *skip_call = false;
5311    auto r1_start = range1->start;
5312    auto r1_end = range1->end;
5313    auto r2_start = range2->start;
5314    auto r2_end = range2->end;
5315    VkDeviceSize pad_align = 1;
5316    if (range1->linear != range2->linear) {
5317        pad_align = dev_data->phys_dev_properties.properties.limits.bufferImageGranularity;
5318    }
5319    if ((r1_end & ~(pad_align - 1)) < (r2_start & ~(pad_align - 1))) return false;
5320    if ((r1_start & ~(pad_align - 1)) > (r2_end & ~(pad_align - 1))) return false;
5321
5322    if (range1->linear != range2->linear) {
5323        // In linear vs. non-linear case, warn of aliasing
5324        const char *r1_linear_str = range1->linear ? "Linear" : "Non-linear";
5325        const char *r1_type_str = range1->image ? "image" : "buffer";
5326        const char *r2_linear_str = range2->linear ? "linear" : "non-linear";
5327        const char *r2_type_str = range2->image ? "image" : "buffer";
5328        auto obj_type = range1->image ? VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT : VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT;
5329        *skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, obj_type, range1->handle, 0,
5330                              MEMTRACK_INVALID_ALIASING, "MEM", "%s %s 0x%" PRIx64 " is aliased with %s %s 0x%" PRIx64
5331                                                                " which may indicate a bug. For further info refer to the "
5332                                                                "Buffer-Image Granularity section of the Vulkan specification. "
5333                                                                "(https://www.khronos.org/registry/vulkan/specs/1.0-extensions/"
5334                                                                "xhtml/vkspec.html#resources-bufferimagegranularity)",
5335                              r1_linear_str, r1_type_str, range1->handle, r2_linear_str, r2_type_str, range2->handle);
5336    }
5337    // Ranges intersect
5338    return true;
5339}
5340// Simplified rangesIntersect that calls above function to check range1 for intersection with offset & end addresses
5341static bool rangesIntersect(layer_data const *dev_data, MEMORY_RANGE const *range1, VkDeviceSize offset, VkDeviceSize end) {
5342    // Create a local MEMORY_RANGE struct to wrap offset/size
5343    MEMORY_RANGE range_wrap;
5344    // Synch linear with range1 to avoid padding and potential validation error case
5345    range_wrap.linear = range1->linear;
5346    range_wrap.start = offset;
5347    range_wrap.end = end;
5348    bool tmp_bool;
5349    return rangesIntersect(dev_data, range1, &range_wrap, &tmp_bool);
5350}
5351// For given mem_info, set all ranges valid that intersect [offset-end] range
5352// TODO : For ranges where there is no alias, we may want to create new buffer ranges that are valid
5353static void SetMemRangesValid(layer_data const *dev_data, DEVICE_MEM_INFO *mem_info, VkDeviceSize offset, VkDeviceSize end) {
5354    bool tmp_bool = false;
5355    MEMORY_RANGE map_range = {};
5356    map_range.linear = true;
5357    map_range.start = offset;
5358    map_range.end = end;
5359    for (auto &handle_range_pair : mem_info->bound_ranges) {
5360        if (rangesIntersect(dev_data, &handle_range_pair.second, &map_range, &tmp_bool)) {
5361            // TODO : WARN here if tmp_bool true?
5362            handle_range_pair.second.valid = true;
5363        }
5364    }
5365}
5366// Object with given handle is being bound to memory w/ given mem_info struct.
5367//  Track the newly bound memory range with given memoryOffset
5368//  Also scan any previous ranges, track aliased ranges with new range, and flag an error if a linear
5369//  and non-linear range incorrectly overlap.
5370// Return true if an error is flagged and the user callback returns "true", otherwise false
5371// is_image indicates an image object, otherwise handle is for a buffer
5372// is_linear indicates a buffer or linear image
5373static bool InsertMemoryRange(layer_data const *dev_data, uint64_t handle, DEVICE_MEM_INFO *mem_info, VkDeviceSize memoryOffset,
5374                              VkMemoryRequirements memRequirements, bool is_image, bool is_linear) {
5375    bool skip_call = false;
5376    MEMORY_RANGE range;
5377
5378    range.image = is_image;
5379    range.handle = handle;
5380    range.linear = is_linear;
5381    range.valid = mem_info->global_valid;
5382    range.memory = mem_info->mem;
5383    range.start = memoryOffset;
5384    range.size = memRequirements.size;
5385    range.end = memoryOffset + memRequirements.size - 1;
5386    range.aliases.clear();
5387    // Update Memory aliasing
5388    // Save aliased ranges so we can copy into final map entry below. Can't do it in loop b/c we don't yet have final ptr. If we
5389    // inserted into map before loop to get the final ptr, then we may enter loop when not needed & we check range against itself
5390    std::unordered_set<MEMORY_RANGE *> tmp_alias_ranges;
5391    for (auto &obj_range_pair : mem_info->bound_ranges) {
5392        auto check_range = &obj_range_pair.second;
5393        bool intersection_error = false;
5394        if (rangesIntersect(dev_data, &range, check_range, &intersection_error)) {
5395            skip_call |= intersection_error;
5396            range.aliases.insert(check_range);
5397            tmp_alias_ranges.insert(check_range);
5398        }
5399    }
5400    mem_info->bound_ranges[handle] = std::move(range);
5401    for (auto tmp_range : tmp_alias_ranges) {
5402        tmp_range->aliases.insert(&mem_info->bound_ranges[handle]);
5403    }
5404    if (is_image)
5405        mem_info->bound_images.insert(handle);
5406    else
5407        mem_info->bound_buffers.insert(handle);
5408
5409    return skip_call;
5410}
5411
5412static bool InsertImageMemoryRange(layer_data const *dev_data, VkImage image, DEVICE_MEM_INFO *mem_info, VkDeviceSize mem_offset,
5413                                   VkMemoryRequirements mem_reqs, bool is_linear) {
5414    return InsertMemoryRange(dev_data, reinterpret_cast<uint64_t &>(image), mem_info, mem_offset, mem_reqs, true, is_linear);
5415}
5416
5417static bool InsertBufferMemoryRange(layer_data const *dev_data, VkBuffer buffer, DEVICE_MEM_INFO *mem_info, VkDeviceSize mem_offset,
5418                                    VkMemoryRequirements mem_reqs) {
5419    return InsertMemoryRange(dev_data, reinterpret_cast<uint64_t &>(buffer), mem_info, mem_offset, mem_reqs, false, true);
5420}
5421
5422// Remove MEMORY_RANGE struct for give handle from bound_ranges of mem_info
5423//  is_image indicates if handle is for image or buffer
5424//  This function will also remove the handle-to-index mapping from the appropriate
5425//  map and clean up any aliases for range being removed.
5426static void RemoveMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info, bool is_image) {
5427    auto erase_range = &mem_info->bound_ranges[handle];
5428    for (auto alias_range : erase_range->aliases) {
5429        alias_range->aliases.erase(erase_range);
5430    }
5431    erase_range->aliases.clear();
5432    mem_info->bound_ranges.erase(handle);
5433    if (is_image) {
5434        mem_info->bound_images.erase(handle);
5435    } else {
5436        mem_info->bound_buffers.erase(handle);
5437    }
5438}
5439
5440static void RemoveBufferMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info) { RemoveMemoryRange(handle, mem_info, false); }
5441
5442void RemoveImageMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info) { RemoveMemoryRange(handle, mem_info, true); }
5443
5444static bool PreCallValidateDestroyBuffer(layer_data *dev_data, VkBuffer buffer, BUFFER_STATE **buffer_state,
5445                                         VK_OBJECT *obj_struct) {
5446    *buffer_state = getBufferState(dev_data, buffer);
5447    *obj_struct = {reinterpret_cast<uint64_t &>(buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT};
5448    if (dev_data->instance_data->disabled.destroy_buffer) return false;
5449    bool skip = false;
5450    if (*buffer_state) {
5451        skip |= validateIdleBuffer(dev_data, buffer);
5452    }
5453    return skip;
5454}
5455
5456static void PostCallRecordDestroyBuffer(layer_data *dev_data, VkBuffer buffer, BUFFER_STATE *buffer_state, VK_OBJECT obj_struct) {
5457    invalidateCommandBuffers(dev_data, buffer_state->cb_bindings, obj_struct);
5458    for (auto mem_binding : buffer_state->GetBoundMemory()) {
5459        auto mem_info = getMemObjInfo(dev_data, mem_binding);
5460        if (mem_info) {
5461            RemoveBufferMemoryRange(reinterpret_cast<uint64_t &>(buffer), mem_info);
5462        }
5463    }
5464    ClearMemoryObjectBindings(dev_data, reinterpret_cast<uint64_t &>(buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT);
5465    dev_data->bufferMap.erase(buffer_state->buffer);
5466}
5467
5468VKAPI_ATTR void VKAPI_CALL DestroyBuffer(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks *pAllocator) {
5469    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5470    BUFFER_STATE *buffer_state = nullptr;
5471    VK_OBJECT obj_struct;
5472    std::unique_lock<std::mutex> lock(global_lock);
5473    bool skip = PreCallValidateDestroyBuffer(dev_data, buffer, &buffer_state, &obj_struct);
5474    if (!skip) {
5475        lock.unlock();
5476        dev_data->dispatch_table.DestroyBuffer(device, buffer, pAllocator);
5477        lock.lock();
5478        if (buffer != VK_NULL_HANDLE) {
5479            PostCallRecordDestroyBuffer(dev_data, buffer, buffer_state, obj_struct);
5480        }
5481    }
5482}
5483
5484static bool PreCallValidateDestroyBufferView(layer_data *dev_data, VkBufferView buffer_view, BUFFER_VIEW_STATE **buffer_view_state,
5485                                             VK_OBJECT *obj_struct) {
5486    *buffer_view_state = getBufferViewState(dev_data, buffer_view);
5487    *obj_struct = {reinterpret_cast<uint64_t &>(buffer_view), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT};
5488    if (dev_data->instance_data->disabled.destroy_buffer_view) return false;
5489    bool skip = false;
5490    if (*buffer_view_state) {
5491        skip |= ValidateObjectNotInUse(dev_data, *buffer_view_state, *obj_struct, VALIDATION_ERROR_00701);
5492    }
5493    return skip;
5494}
5495
5496static void PostCallRecordDestroyBufferView(layer_data *dev_data, VkBufferView buffer_view, BUFFER_VIEW_STATE *buffer_view_state,
5497                                            VK_OBJECT obj_struct) {
5498    // Any bound cmd buffers are now invalid
5499    invalidateCommandBuffers(dev_data, buffer_view_state->cb_bindings, obj_struct);
5500    dev_data->bufferViewMap.erase(buffer_view);
5501}
5502
5503VKAPI_ATTR void VKAPI_CALL DestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator) {
5504    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5505    // Common data objects used pre & post call
5506    BUFFER_VIEW_STATE *buffer_view_state = nullptr;
5507    VK_OBJECT obj_struct;
5508    std::unique_lock<std::mutex> lock(global_lock);
5509    // Validate state before calling down chain, update common data if we'll be calling down chain
5510    bool skip = PreCallValidateDestroyBufferView(dev_data, bufferView, &buffer_view_state, &obj_struct);
5511    if (!skip) {
5512        lock.unlock();
5513        dev_data->dispatch_table.DestroyBufferView(device, bufferView, pAllocator);
5514        lock.lock();
5515        if (bufferView != VK_NULL_HANDLE) {
5516            PostCallRecordDestroyBufferView(dev_data, bufferView, buffer_view_state, obj_struct);
5517        }
5518    }
5519}
5520
5521VKAPI_ATTR void VKAPI_CALL DestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
5522    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5523    IMAGE_STATE *image_state = nullptr;
5524    VK_OBJECT obj_struct;
5525    std::unique_lock<std::mutex> lock(global_lock);
5526    bool skip = PreCallValidateDestroyImage(dev_data, image, &image_state, &obj_struct);
5527    if (!skip) {
5528        lock.unlock();
5529        dev_data->dispatch_table.DestroyImage(device, image, pAllocator);
5530        lock.lock();
5531        if (image != VK_NULL_HANDLE) {
5532            PostCallRecordDestroyImage(dev_data, image, image_state, obj_struct);
5533        }
5534    }
5535}
5536
5537static bool ValidateMemoryTypes(const layer_data *dev_data, const DEVICE_MEM_INFO *mem_info, const uint32_t memory_type_bits,
5538                                const char *funcName, UNIQUE_VALIDATION_ERROR_CODE msgCode) {
5539    bool skip_call = false;
5540    if (((1 << mem_info->alloc_info.memoryTypeIndex) & memory_type_bits) == 0) {
5541        skip_call =
5542            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5543                    reinterpret_cast<const uint64_t &>(mem_info->mem), __LINE__, msgCode, "MT",
5544                    "%s(): MemoryRequirements->memoryTypeBits (0x%X) for this object type are not compatible with the memory "
5545                    "type (0x%X) of this memory object 0x%" PRIx64 ". %s",
5546                    funcName, memory_type_bits, mem_info->alloc_info.memoryTypeIndex,
5547                    reinterpret_cast<const uint64_t &>(mem_info->mem), validation_error_map[msgCode]);
5548    }
5549    return skip_call;
5550}
5551
5552VKAPI_ATTR VkResult VKAPI_CALL BindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
5553    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5554    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5555    std::unique_lock<std::mutex> lock(global_lock);
5556    // Track objects tied to memory
5557    uint64_t buffer_handle = reinterpret_cast<uint64_t &>(buffer);
5558    bool skip_call = SetMemBinding(dev_data, mem, buffer_handle, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "vkBindBufferMemory");
5559    auto buffer_state = getBufferState(dev_data, buffer);
5560    if (buffer_state) {
5561        if (!buffer_state->memory_requirements_checked) {
5562            // There's not an explicit requirement in the spec to call vkGetBufferMemoryRequirements() prior to calling
5563            //  BindBufferMemory but it's implied in that memory being bound must conform with VkMemoryRequirements from
5564            //  vkGetBufferMemoryRequirements()
5565            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5566                                 buffer_handle, __LINE__, DRAWSTATE_INVALID_BUFFER, "DS",
5567                                 "vkBindBufferMemory(): Binding memory to buffer 0x%" PRIxLEAST64
5568                                 " but vkGetBufferMemoryRequirements() has not been called on that buffer.",
5569                                 buffer_handle);
5570            // Make the call for them so we can verify the state
5571            lock.unlock();
5572            dev_data->dispatch_table.GetBufferMemoryRequirements(device, buffer, &buffer_state->requirements);
5573            lock.lock();
5574        }
5575        buffer_state->binding.mem = mem;
5576        buffer_state->binding.offset = memoryOffset;
5577        buffer_state->binding.size = buffer_state->requirements.size;
5578
5579        // Track and validate bound memory range information
5580        auto mem_info = getMemObjInfo(dev_data, mem);
5581        if (mem_info) {
5582            skip_call |= InsertBufferMemoryRange(dev_data, buffer, mem_info, memoryOffset, buffer_state->requirements);
5583            skip_call |= ValidateMemoryTypes(dev_data, mem_info, buffer_state->requirements.memoryTypeBits, "vkBindBufferMemory()",
5584                                             VALIDATION_ERROR_00797);
5585        }
5586
5587        // Validate memory requirements alignment
5588        if (vk_safe_modulo(memoryOffset, buffer_state->requirements.alignment) != 0) {
5589            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5590                                 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, VALIDATION_ERROR_02174, "DS",
5591                                 "vkBindBufferMemory(): memoryOffset is 0x%" PRIxLEAST64
5592                                 " but must be an integer multiple of the "
5593                                 "VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
5594                                 ", returned from a call to vkGetBufferMemoryRequirements with buffer. %s",
5595                                 memoryOffset, buffer_state->requirements.alignment, validation_error_map[VALIDATION_ERROR_02174]);
5596        }
5597
5598        // Validate device limits alignments
5599        static const VkBufferUsageFlagBits usage_list[3] = {
5600            static_cast<VkBufferUsageFlagBits>(VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT),
5601            VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT};
5602        static const char *memory_type[3] = {"texel", "uniform", "storage"};
5603        static const char *offset_name[3] = {"minTexelBufferOffsetAlignment", "minUniformBufferOffsetAlignment",
5604                                             "minStorageBufferOffsetAlignment"};
5605
5606        // TODO:  vk_validation_stats.py cannot abide braces immediately preceeding or following a validation error enum
5607        // clang-format off
5608        static const UNIQUE_VALIDATION_ERROR_CODE msgCode[3] = { VALIDATION_ERROR_00794, VALIDATION_ERROR_00795,
5609                                                                 VALIDATION_ERROR_00796 };
5610        // clang-format on
5611
5612        // Keep this one fresh!
5613        const VkDeviceSize offset_requirement[3] = {
5614            dev_data->phys_dev_properties.properties.limits.minTexelBufferOffsetAlignment,
5615            dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment,
5616            dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment};
5617        VkBufferUsageFlags usage = dev_data->bufferMap[buffer].get()->createInfo.usage;
5618
5619        for (int i = 0; i < 3; i++) {
5620            if (usage & usage_list[i]) {
5621                if (vk_safe_modulo(memoryOffset, offset_requirement[i]) != 0) {
5622                    skip_call |= log_msg(
5623                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
5624                        __LINE__, msgCode[i], "DS", "vkBindBufferMemory(): %s memoryOffset is 0x%" PRIxLEAST64
5625                                                    " but must be a multiple of "
5626                                                    "device limit %s 0x%" PRIxLEAST64 ". %s",
5627                        memory_type[i], memoryOffset, offset_name[i], offset_requirement[i], validation_error_map[msgCode[i]]);
5628                }
5629            }
5630        }
5631    }
5632    lock.unlock();
5633    if (!skip_call) {
5634        result = dev_data->dispatch_table.BindBufferMemory(device, buffer, mem, memoryOffset);
5635    }
5636    return result;
5637}
5638
5639VKAPI_ATTR void VKAPI_CALL GetBufferMemoryRequirements(VkDevice device, VkBuffer buffer,
5640                                                       VkMemoryRequirements *pMemoryRequirements) {
5641    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5642    dev_data->dispatch_table.GetBufferMemoryRequirements(device, buffer, pMemoryRequirements);
5643    auto buffer_state = getBufferState(dev_data, buffer);
5644    if (buffer_state) {
5645        buffer_state->requirements = *pMemoryRequirements;
5646        buffer_state->memory_requirements_checked = true;
5647    }
5648}
5649
5650VKAPI_ATTR void VKAPI_CALL GetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements *pMemoryRequirements) {
5651    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5652    dev_data->dispatch_table.GetImageMemoryRequirements(device, image, pMemoryRequirements);
5653    auto image_state = getImageState(dev_data, image);
5654    if (image_state) {
5655        image_state->requirements = *pMemoryRequirements;
5656        image_state->memory_requirements_checked = true;
5657    }
5658}
5659
5660static bool PreCallValidateDestroyImageView(layer_data *dev_data, VkImageView image_view, IMAGE_VIEW_STATE **image_view_state,
5661                                            VK_OBJECT *obj_struct) {
5662    *image_view_state = getImageViewState(dev_data, image_view);
5663    *obj_struct = {reinterpret_cast<uint64_t &>(image_view), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT};
5664    if (dev_data->instance_data->disabled.destroy_image_view) return false;
5665    bool skip = false;
5666    if (*image_view_state) {
5667        skip |= ValidateObjectNotInUse(dev_data, *image_view_state, *obj_struct, VALIDATION_ERROR_00776);
5668    }
5669    return skip;
5670}
5671
5672static void PostCallRecordDestroyImageView(layer_data *dev_data, VkImageView image_view, IMAGE_VIEW_STATE *image_view_state,
5673                                           VK_OBJECT obj_struct) {
5674    // Any bound cmd buffers are now invalid
5675    invalidateCommandBuffers(dev_data, image_view_state->cb_bindings, obj_struct);
5676    dev_data->imageViewMap.erase(image_view);
5677}
5678
5679VKAPI_ATTR void VKAPI_CALL DestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator) {
5680    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5681    // Common data objects used pre & post call
5682    IMAGE_VIEW_STATE *image_view_state = nullptr;
5683    VK_OBJECT obj_struct;
5684    std::unique_lock<std::mutex> lock(global_lock);
5685    bool skip = PreCallValidateDestroyImageView(dev_data, imageView, &image_view_state, &obj_struct);
5686    if (!skip) {
5687        lock.unlock();
5688        dev_data->dispatch_table.DestroyImageView(device, imageView, pAllocator);
5689        lock.lock();
5690        if (imageView != VK_NULL_HANDLE) {
5691            PostCallRecordDestroyImageView(dev_data, imageView, image_view_state, obj_struct);
5692        }
5693    }
5694}
5695
5696VKAPI_ATTR void VKAPI_CALL DestroyShaderModule(VkDevice device, VkShaderModule shaderModule,
5697                                               const VkAllocationCallbacks *pAllocator) {
5698    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5699
5700    std::unique_lock<std::mutex> lock(global_lock);
5701    my_data->shaderModuleMap.erase(shaderModule);
5702    lock.unlock();
5703
5704    my_data->dispatch_table.DestroyShaderModule(device, shaderModule, pAllocator);
5705}
5706
5707static bool PreCallValidateDestroyPipeline(layer_data *dev_data, VkPipeline pipeline, PIPELINE_STATE **pipeline_state,
5708                                           VK_OBJECT *obj_struct) {
5709    *pipeline_state = getPipelineState(dev_data, pipeline);
5710    *obj_struct = {reinterpret_cast<uint64_t &>(pipeline), VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT};
5711    if (dev_data->instance_data->disabled.destroy_pipeline) return false;
5712    bool skip = false;
5713    if (*pipeline_state) {
5714        skip |= ValidateObjectNotInUse(dev_data, *pipeline_state, *obj_struct, VALIDATION_ERROR_00555);
5715    }
5716    return skip;
5717}
5718
5719static void PostCallRecordDestroyPipeline(layer_data *dev_data, VkPipeline pipeline, PIPELINE_STATE *pipeline_state,
5720                                          VK_OBJECT obj_struct) {
5721    // Any bound cmd buffers are now invalid
5722    invalidateCommandBuffers(dev_data, pipeline_state->cb_bindings, obj_struct);
5723    dev_data->pipelineMap.erase(pipeline);
5724}
5725
5726VKAPI_ATTR void VKAPI_CALL DestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) {
5727    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5728    PIPELINE_STATE *pipeline_state = nullptr;
5729    VK_OBJECT obj_struct;
5730    std::unique_lock<std::mutex> lock(global_lock);
5731    bool skip = PreCallValidateDestroyPipeline(dev_data, pipeline, &pipeline_state, &obj_struct);
5732    if (!skip) {
5733        lock.unlock();
5734        dev_data->dispatch_table.DestroyPipeline(device, pipeline, pAllocator);
5735        lock.lock();
5736        if (pipeline != VK_NULL_HANDLE) {
5737            PostCallRecordDestroyPipeline(dev_data, pipeline, pipeline_state, obj_struct);
5738        }
5739    }
5740}
5741
5742VKAPI_ATTR void VKAPI_CALL DestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout,
5743                                                 const VkAllocationCallbacks *pAllocator) {
5744    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5745    std::unique_lock<std::mutex> lock(global_lock);
5746    dev_data->pipelineLayoutMap.erase(pipelineLayout);
5747    lock.unlock();
5748
5749    dev_data->dispatch_table.DestroyPipelineLayout(device, pipelineLayout, pAllocator);
5750}
5751
5752static bool PreCallValidateDestroySampler(layer_data *dev_data, VkSampler sampler, SAMPLER_STATE **sampler_state,
5753                                          VK_OBJECT *obj_struct) {
5754    *sampler_state = getSamplerState(dev_data, sampler);
5755    *obj_struct = {reinterpret_cast<uint64_t &>(sampler), VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT};
5756    if (dev_data->instance_data->disabled.destroy_sampler) return false;
5757    bool skip = false;
5758    if (*sampler_state) {
5759        skip |= ValidateObjectNotInUse(dev_data, *sampler_state, *obj_struct, VALIDATION_ERROR_00837);
5760    }
5761    return skip;
5762}
5763
5764static void PostCallRecordDestroySampler(layer_data *dev_data, VkSampler sampler, SAMPLER_STATE *sampler_state,
5765                                         VK_OBJECT obj_struct) {
5766    // Any bound cmd buffers are now invalid
5767    if (sampler_state) invalidateCommandBuffers(dev_data, sampler_state->cb_bindings, obj_struct);
5768    dev_data->samplerMap.erase(sampler);
5769}
5770
5771VKAPI_ATTR void VKAPI_CALL DestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) {
5772    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5773    SAMPLER_STATE *sampler_state = nullptr;
5774    VK_OBJECT obj_struct;
5775    std::unique_lock<std::mutex> lock(global_lock);
5776    bool skip = PreCallValidateDestroySampler(dev_data, sampler, &sampler_state, &obj_struct);
5777    if (!skip) {
5778        lock.unlock();
5779        dev_data->dispatch_table.DestroySampler(device, sampler, pAllocator);
5780        lock.lock();
5781        if (sampler != VK_NULL_HANDLE) {
5782            PostCallRecordDestroySampler(dev_data, sampler, sampler_state, obj_struct);
5783        }
5784    }
5785}
5786
5787static void PostCallRecordDestroyDescriptorSetLayout(layer_data *dev_data, VkDescriptorSetLayout ds_layout) {
5788    dev_data->descriptorSetLayoutMap.erase(ds_layout);
5789}
5790
5791VKAPI_ATTR void VKAPI_CALL DestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout,
5792                                                      const VkAllocationCallbacks *pAllocator) {
5793    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5794    dev_data->dispatch_table.DestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator);
5795    std::unique_lock<std::mutex> lock(global_lock);
5796    PostCallRecordDestroyDescriptorSetLayout(dev_data, descriptorSetLayout);
5797}
5798
5799static bool PreCallValidateDestroyDescriptorPool(layer_data *dev_data, VkDescriptorPool pool,
5800                                                 DESCRIPTOR_POOL_STATE **desc_pool_state, VK_OBJECT *obj_struct) {
5801    *desc_pool_state = getDescriptorPoolState(dev_data, pool);
5802    *obj_struct = {reinterpret_cast<uint64_t &>(pool), VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT};
5803    if (dev_data->instance_data->disabled.destroy_descriptor_pool) return false;
5804    bool skip = false;
5805    if (*desc_pool_state) {
5806        skip |= ValidateObjectNotInUse(dev_data, *desc_pool_state, *obj_struct, VALIDATION_ERROR_00901);
5807    }
5808    return skip;
5809}
5810
5811static void PostCallRecordDestroyDescriptorPool(layer_data *dev_data, VkDescriptorPool descriptorPool,
5812                                                DESCRIPTOR_POOL_STATE *desc_pool_state, VK_OBJECT obj_struct) {
5813    // Any bound cmd buffers are now invalid
5814    invalidateCommandBuffers(dev_data, desc_pool_state->cb_bindings, obj_struct);
5815    // Free sets that were in this pool
5816    for (auto ds : desc_pool_state->sets) {
5817        freeDescriptorSet(dev_data, ds);
5818    }
5819    dev_data->descriptorPoolMap.erase(descriptorPool);
5820}
5821
5822VKAPI_ATTR void VKAPI_CALL DestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
5823                                                 const VkAllocationCallbacks *pAllocator) {
5824    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5825    DESCRIPTOR_POOL_STATE *desc_pool_state = nullptr;
5826    VK_OBJECT obj_struct;
5827    std::unique_lock<std::mutex> lock(global_lock);
5828    bool skip = PreCallValidateDestroyDescriptorPool(dev_data, descriptorPool, &desc_pool_state, &obj_struct);
5829    if (!skip) {
5830        lock.unlock();
5831        dev_data->dispatch_table.DestroyDescriptorPool(device, descriptorPool, pAllocator);
5832        lock.lock();
5833        if (descriptorPool != VK_NULL_HANDLE) {
5834            PostCallRecordDestroyDescriptorPool(dev_data, descriptorPool, desc_pool_state, obj_struct);
5835        }
5836    }
5837}
5838// Verify cmdBuffer in given cb_node is not in global in-flight set, and return skip_call result
5839//  If this is a secondary command buffer, then make sure its primary is also in-flight
5840//  If primary is not in-flight, then remove secondary from global in-flight set
5841// This function is only valid at a point when cmdBuffer is being reset or freed
5842static bool checkCommandBufferInFlight(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const char *action,
5843                                       UNIQUE_VALIDATION_ERROR_CODE error_code) {
5844    bool skip_call = false;
5845    if (dev_data->globalInFlightCmdBuffers.count(cb_node->commandBuffer)) {
5846        // Primary CB or secondary where primary is also in-flight is an error
5847        if ((cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_SECONDARY) ||
5848            (dev_data->globalInFlightCmdBuffers.count(cb_node->primaryCommandBuffer))) {
5849            skip_call |=
5850                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5851                        reinterpret_cast<const uint64_t &>(cb_node->commandBuffer), __LINE__, error_code, "DS",
5852                        "Attempt to %s command buffer (0x%p) which is in use. %s", action, cb_node->commandBuffer,
5853                        validation_error_map[error_code]);
5854        }
5855    }
5856    return skip_call;
5857}
5858
5859// Iterate over all cmdBuffers in given commandPool and verify that each is not in use
5860static bool checkCommandBuffersInFlight(layer_data *dev_data, COMMAND_POOL_NODE *pPool, const char *action,
5861                                        UNIQUE_VALIDATION_ERROR_CODE error_code) {
5862    bool skip_call = false;
5863    for (auto cmd_buffer : pPool->commandBuffers) {
5864        if (dev_data->globalInFlightCmdBuffers.count(cmd_buffer)) {
5865            skip_call |= checkCommandBufferInFlight(dev_data, getCBNode(dev_data, cmd_buffer), action, error_code);
5866        }
5867    }
5868    return skip_call;
5869}
5870
5871static void clearCommandBuffersInFlight(layer_data *dev_data, COMMAND_POOL_NODE *pPool) {
5872    for (auto cmd_buffer : pPool->commandBuffers) {
5873        dev_data->globalInFlightCmdBuffers.erase(cmd_buffer);
5874    }
5875}
5876
5877VKAPI_ATTR void VKAPI_CALL FreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount,
5878                                              const VkCommandBuffer *pCommandBuffers) {
5879    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5880    bool skip_call = false;
5881    std::unique_lock<std::mutex> lock(global_lock);
5882
5883    for (uint32_t i = 0; i < commandBufferCount; i++) {
5884        auto cb_node = getCBNode(dev_data, pCommandBuffers[i]);
5885        // Delete CB information structure, and remove from commandBufferMap
5886        if (cb_node) {
5887            skip_call |= checkCommandBufferInFlight(dev_data, cb_node, "free", VALIDATION_ERROR_00096);
5888        }
5889    }
5890
5891    if (skip_call) return;
5892
5893    auto pPool = getCommandPoolNode(dev_data, commandPool);
5894    for (uint32_t i = 0; i < commandBufferCount; i++) {
5895        auto cb_node = getCBNode(dev_data, pCommandBuffers[i]);
5896        // Delete CB information structure, and remove from commandBufferMap
5897        if (cb_node) {
5898            dev_data->globalInFlightCmdBuffers.erase(cb_node->commandBuffer);
5899            // reset prior to delete for data clean-up
5900            resetCB(dev_data, cb_node->commandBuffer);
5901            dev_data->commandBufferMap.erase(cb_node->commandBuffer);
5902            delete cb_node;
5903        }
5904
5905        // Remove commandBuffer reference from commandPoolMap
5906        pPool->commandBuffers.remove(pCommandBuffers[i]);
5907    }
5908    lock.unlock();
5909
5910    dev_data->dispatch_table.FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);
5911}
5912
5913VKAPI_ATTR VkResult VKAPI_CALL CreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
5914                                                 const VkAllocationCallbacks *pAllocator, VkCommandPool *pCommandPool) {
5915    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5916
5917    VkResult result = dev_data->dispatch_table.CreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool);
5918
5919    if (VK_SUCCESS == result) {
5920        std::lock_guard<std::mutex> lock(global_lock);
5921        dev_data->commandPoolMap[*pCommandPool].createFlags = pCreateInfo->flags;
5922        dev_data->commandPoolMap[*pCommandPool].queueFamilyIndex = pCreateInfo->queueFamilyIndex;
5923    }
5924    return result;
5925}
5926
5927VKAPI_ATTR VkResult VKAPI_CALL CreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
5928                                               const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) {
5929    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5930    bool skip = false;
5931    if (pCreateInfo && pCreateInfo->queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS) {
5932        if (!dev_data->enabled_features.pipelineStatisticsQuery) {
5933            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
5934                            __LINE__, VALIDATION_ERROR_01006, "DS",
5935                            "Query pool with type VK_QUERY_TYPE_PIPELINE_STATISTICS created on a device "
5936                            "with VkDeviceCreateInfo.pEnabledFeatures.pipelineStatisticsQuery == VK_FALSE. %s",
5937                            validation_error_map[VALIDATION_ERROR_01006]);
5938        }
5939    }
5940
5941    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5942    if (!skip) {
5943        result = dev_data->dispatch_table.CreateQueryPool(device, pCreateInfo, pAllocator, pQueryPool);
5944    }
5945    if (result == VK_SUCCESS) {
5946        std::lock_guard<std::mutex> lock(global_lock);
5947        QUERY_POOL_NODE *qp_node = &dev_data->queryPoolMap[*pQueryPool];
5948        qp_node->createInfo = *pCreateInfo;
5949    }
5950    return result;
5951}
5952
5953static bool PreCallValidateDestroyCommandPool(layer_data *dev_data, VkCommandPool pool, COMMAND_POOL_NODE **cp_state) {
5954    *cp_state = getCommandPoolNode(dev_data, pool);
5955    if (dev_data->instance_data->disabled.destroy_command_pool) return false;
5956    bool skip = false;
5957    if (*cp_state) {
5958        // Verify that command buffers in pool are complete (not in-flight)
5959        skip |= checkCommandBuffersInFlight(dev_data, *cp_state, "destroy command pool with", VALIDATION_ERROR_00077);
5960    }
5961    return skip;
5962}
5963
5964static void PostCallRecordDestroyCommandPool(layer_data *dev_data, VkCommandPool pool, COMMAND_POOL_NODE *cp_state) {
5965    // Must remove cmdpool from cmdpoolmap, after removing all cmdbuffers in its list from the commandBufferMap
5966    clearCommandBuffersInFlight(dev_data, cp_state);
5967    for (auto cb : cp_state->commandBuffers) {
5968        clear_cmd_buf_and_mem_references(dev_data, cb);
5969        auto cb_node = getCBNode(dev_data, cb);
5970        // Remove references to this cb_node prior to delete
5971        // TODO : Need better solution here, resetCB?
5972        for (auto obj : cb_node->object_bindings) {
5973            removeCommandBufferBinding(dev_data, &obj, cb_node);
5974        }
5975        for (auto framebuffer : cb_node->framebuffers) {
5976            auto fb_state = getFramebufferState(dev_data, framebuffer);
5977            if (fb_state) fb_state->cb_bindings.erase(cb_node);
5978        }
5979        dev_data->commandBufferMap.erase(cb);  // Remove this command buffer
5980        delete cb_node;                        // delete CB info structure
5981    }
5982    dev_data->commandPoolMap.erase(pool);
5983}
5984
5985// Destroy commandPool along with all of the commandBuffers allocated from that pool
5986VKAPI_ATTR void VKAPI_CALL DestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
5987    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5988    COMMAND_POOL_NODE *cp_state = nullptr;
5989    std::unique_lock<std::mutex> lock(global_lock);
5990    bool skip = PreCallValidateDestroyCommandPool(dev_data, commandPool, &cp_state);
5991    if (!skip) {
5992        lock.unlock();
5993        dev_data->dispatch_table.DestroyCommandPool(device, commandPool, pAllocator);
5994        lock.lock();
5995        if (commandPool != VK_NULL_HANDLE) {
5996            PostCallRecordDestroyCommandPool(dev_data, commandPool, cp_state);
5997        }
5998    }
5999}
6000
6001VKAPI_ATTR VkResult VKAPI_CALL ResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) {
6002    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6003    bool skip_call = false;
6004
6005    std::unique_lock<std::mutex> lock(global_lock);
6006    auto pPool = getCommandPoolNode(dev_data, commandPool);
6007    skip_call |= checkCommandBuffersInFlight(dev_data, pPool, "reset command pool with", VALIDATION_ERROR_00072);
6008    lock.unlock();
6009
6010    if (skip_call) return VK_ERROR_VALIDATION_FAILED_EXT;
6011
6012    VkResult result = dev_data->dispatch_table.ResetCommandPool(device, commandPool, flags);
6013
6014    // Reset all of the CBs allocated from this pool
6015    if (VK_SUCCESS == result) {
6016        lock.lock();
6017        clearCommandBuffersInFlight(dev_data, pPool);
6018        for (auto cmdBuffer : pPool->commandBuffers) {
6019            resetCB(dev_data, cmdBuffer);
6020        }
6021        lock.unlock();
6022    }
6023    return result;
6024}
6025
6026VKAPI_ATTR VkResult VKAPI_CALL ResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) {
6027    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6028    bool skip_call = false;
6029    std::unique_lock<std::mutex> lock(global_lock);
6030    for (uint32_t i = 0; i < fenceCount; ++i) {
6031        auto pFence = getFenceNode(dev_data, pFences[i]);
6032        if (pFence && pFence->state == FENCE_INFLIGHT) {
6033            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
6034                                 reinterpret_cast<const uint64_t &>(pFences[i]), __LINE__, VALIDATION_ERROR_00183, "DS",
6035                                 "Fence 0x%" PRIx64 " is in use. %s", reinterpret_cast<const uint64_t &>(pFences[i]),
6036                                 validation_error_map[VALIDATION_ERROR_00183]);
6037        }
6038    }
6039    lock.unlock();
6040
6041    if (skip_call) return VK_ERROR_VALIDATION_FAILED_EXT;
6042
6043    VkResult result = dev_data->dispatch_table.ResetFences(device, fenceCount, pFences);
6044
6045    if (result == VK_SUCCESS) {
6046        lock.lock();
6047        for (uint32_t i = 0; i < fenceCount; ++i) {
6048            auto pFence = getFenceNode(dev_data, pFences[i]);
6049            if (pFence) {
6050                pFence->state = FENCE_UNSIGNALED;
6051            }
6052        }
6053        lock.unlock();
6054    }
6055
6056    return result;
6057}
6058
6059// For given cb_nodes, invalidate them and track object causing invalidation
6060void invalidateCommandBuffers(const layer_data *dev_data, std::unordered_set<GLOBAL_CB_NODE *> const &cb_nodes, VK_OBJECT obj) {
6061    for (auto cb_node : cb_nodes) {
6062        if (cb_node->state == CB_RECORDING) {
6063            log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6064                    (uint64_t)(cb_node->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
6065                    "Invalidating a command buffer that's currently being recorded: 0x%p.", cb_node->commandBuffer);
6066        }
6067        cb_node->state = CB_INVALID;
6068        cb_node->broken_bindings.push_back(obj);
6069    }
6070}
6071
6072static bool PreCallValidateDestroyFramebuffer(layer_data *dev_data, VkFramebuffer framebuffer,
6073                                              FRAMEBUFFER_STATE **framebuffer_state, VK_OBJECT *obj_struct) {
6074    *framebuffer_state = getFramebufferState(dev_data, framebuffer);
6075    *obj_struct = {reinterpret_cast<uint64_t &>(framebuffer), VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT};
6076    if (dev_data->instance_data->disabled.destroy_framebuffer) return false;
6077    bool skip = false;
6078    if (*framebuffer_state) {
6079        skip |= ValidateObjectNotInUse(dev_data, *framebuffer_state, *obj_struct, VALIDATION_ERROR_00422);
6080    }
6081    return skip;
6082}
6083
6084static void PostCallRecordDestroyFramebuffer(layer_data *dev_data, VkFramebuffer framebuffer, FRAMEBUFFER_STATE *framebuffer_state,
6085                                             VK_OBJECT obj_struct) {
6086    invalidateCommandBuffers(dev_data, framebuffer_state->cb_bindings, obj_struct);
6087    dev_data->frameBufferMap.erase(framebuffer);
6088}
6089
6090VKAPI_ATTR void VKAPI_CALL DestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) {
6091    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6092    FRAMEBUFFER_STATE *framebuffer_state = nullptr;
6093    VK_OBJECT obj_struct;
6094    std::unique_lock<std::mutex> lock(global_lock);
6095    bool skip = PreCallValidateDestroyFramebuffer(dev_data, framebuffer, &framebuffer_state, &obj_struct);
6096    if (!skip) {
6097        lock.unlock();
6098        dev_data->dispatch_table.DestroyFramebuffer(device, framebuffer, pAllocator);
6099        lock.lock();
6100        if (framebuffer != VK_NULL_HANDLE) {
6101            PostCallRecordDestroyFramebuffer(dev_data, framebuffer, framebuffer_state, obj_struct);
6102        }
6103    }
6104}
6105
6106static bool PreCallValidateDestroyRenderPass(layer_data *dev_data, VkRenderPass render_pass, RENDER_PASS_STATE **rp_state,
6107                                             VK_OBJECT *obj_struct) {
6108    *rp_state = getRenderPassState(dev_data, render_pass);
6109    *obj_struct = {reinterpret_cast<uint64_t &>(render_pass), VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT};
6110    if (dev_data->instance_data->disabled.destroy_renderpass) return false;
6111    bool skip = false;
6112    if (*rp_state) {
6113        skip |= ValidateObjectNotInUse(dev_data, *rp_state, *obj_struct, VALIDATION_ERROR_00393);
6114    }
6115    return skip;
6116}
6117
6118static void PostCallRecordDestroyRenderPass(layer_data *dev_data, VkRenderPass render_pass, RENDER_PASS_STATE *rp_state,
6119                                            VK_OBJECT obj_struct) {
6120    invalidateCommandBuffers(dev_data, rp_state->cb_bindings, obj_struct);
6121    dev_data->renderPassMap.erase(render_pass);
6122}
6123
6124VKAPI_ATTR void VKAPI_CALL DestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) {
6125    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6126    RENDER_PASS_STATE *rp_state = nullptr;
6127    VK_OBJECT obj_struct;
6128    std::unique_lock<std::mutex> lock(global_lock);
6129    bool skip = PreCallValidateDestroyRenderPass(dev_data, renderPass, &rp_state, &obj_struct);
6130    if (!skip) {
6131        lock.unlock();
6132        dev_data->dispatch_table.DestroyRenderPass(device, renderPass, pAllocator);
6133        lock.lock();
6134        if (renderPass != VK_NULL_HANDLE) {
6135            PostCallRecordDestroyRenderPass(dev_data, renderPass, rp_state, obj_struct);
6136        }
6137    }
6138}
6139
6140VKAPI_ATTR VkResult VKAPI_CALL CreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo,
6141                                            const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) {
6142    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6143    // TODO: Add check for VALIDATION_ERROR_00658
6144    // TODO: Add check for VALIDATION_ERROR_00666
6145    // TODO: Add check for VALIDATION_ERROR_00667
6146    // TODO: Add check for VALIDATION_ERROR_00668
6147    // TODO: Add check for VALIDATION_ERROR_00669
6148    VkResult result = dev_data->dispatch_table.CreateBuffer(device, pCreateInfo, pAllocator, pBuffer);
6149
6150    if (VK_SUCCESS == result) {
6151        std::lock_guard<std::mutex> lock(global_lock);
6152        // TODO : This doesn't create deep copy of pQueueFamilyIndices so need to fix that if/when we want that data to be valid
6153        dev_data->bufferMap.insert(std::make_pair(*pBuffer, unique_ptr<BUFFER_STATE>(new BUFFER_STATE(*pBuffer, pCreateInfo))));
6154    }
6155    return result;
6156}
6157
6158static bool PreCallValidateCreateBufferView(layer_data *dev_data, const VkBufferViewCreateInfo *pCreateInfo) {
6159    bool skip_call = false;
6160    BUFFER_STATE *buffer_state = getBufferState(dev_data, pCreateInfo->buffer);
6161    // If this isn't a sparse buffer, it needs to have memory backing it at CreateBufferView time
6162    if (buffer_state) {
6163        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buffer_state, "vkCreateBufferView()", VALIDATION_ERROR_02522);
6164        // In order to create a valid buffer view, the buffer must have been created with at least one of the
6165        // following flags:  UNIFORM_TEXEL_BUFFER_BIT or STORAGE_TEXEL_BUFFER_BIT
6166        skip_call |= ValidateBufferUsageFlags(
6167            dev_data, buffer_state, VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT, false,
6168            VALIDATION_ERROR_00694, "vkCreateBufferView()", "VK_BUFFER_USAGE_[STORAGE|UNIFORM]_TEXEL_BUFFER_BIT");
6169    }
6170    return skip_call;
6171}
6172
6173VKAPI_ATTR VkResult VKAPI_CALL CreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo,
6174                                                const VkAllocationCallbacks *pAllocator, VkBufferView *pView) {
6175    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6176    std::unique_lock<std::mutex> lock(global_lock);
6177    bool skip_call = PreCallValidateCreateBufferView(dev_data, pCreateInfo);
6178    lock.unlock();
6179    if (skip_call) return VK_ERROR_VALIDATION_FAILED_EXT;
6180    VkResult result = dev_data->dispatch_table.CreateBufferView(device, pCreateInfo, pAllocator, pView);
6181    if (VK_SUCCESS == result) {
6182        lock.lock();
6183        dev_data->bufferViewMap[*pView] = unique_ptr<BUFFER_VIEW_STATE>(new BUFFER_VIEW_STATE(*pView, pCreateInfo));
6184        lock.unlock();
6185    }
6186    return result;
6187}
6188
6189// Access helper functions for external modules
6190PFN_vkGetPhysicalDeviceFormatProperties GetFormatPropertiesPointer(core_validation::layer_data *device_data) {
6191    return device_data->instance_data->dispatch_table.GetPhysicalDeviceFormatProperties;
6192}
6193
6194PFN_vkGetPhysicalDeviceImageFormatProperties GetImageFormatPropertiesPointer(core_validation::layer_data *device_data) {
6195    return device_data->instance_data->dispatch_table.GetPhysicalDeviceImageFormatProperties;
6196}
6197
6198VkPhysicalDevice GetPhysicalDevice(core_validation::layer_data *device_data) { return device_data->physical_device; }
6199
6200const debug_report_data *GetReportData(core_validation::layer_data *device_data) { return device_data->report_data; }
6201
6202const VkPhysicalDeviceProperties *GetPhysicalDeviceProperties(core_validation::layer_data *device_data) {
6203    return &device_data->phys_dev_props;
6204}
6205
6206const CHECK_DISABLED *GetDisables(core_validation::layer_data *device_data) { return &device_data->instance_data->disabled; }
6207
6208std::unordered_map<VkImage, std::unique_ptr<IMAGE_STATE>> *GetImageMap(core_validation::layer_data *device_data) {
6209    return &device_data->imageMap;
6210}
6211
6212std::unordered_map<VkImage, std::vector<ImageSubresourcePair>> *GetImageSubresourceMap(core_validation::layer_data *device_data) {
6213    return &device_data->imageSubresourceMap;
6214}
6215
6216std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> *GetImageLayoutMap(layer_data *device_data) {
6217    return &device_data->imageLayoutMap;
6218}
6219
6220VKAPI_ATTR VkResult VKAPI_CALL CreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo,
6221                                           const VkAllocationCallbacks *pAllocator, VkImage *pImage) {
6222    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
6223    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6224    bool skip = PreCallValidateCreateImage(dev_data, pCreateInfo, pAllocator, pImage);
6225    if (!skip) {
6226        result = dev_data->dispatch_table.CreateImage(device, pCreateInfo, pAllocator, pImage);
6227    }
6228    if (VK_SUCCESS == result) {
6229        std::lock_guard<std::mutex> lock(global_lock);
6230        PostCallRecordCreateImage(dev_data, pCreateInfo, pImage);
6231    }
6232    return result;
6233}
6234
6235// For the given format verify that the aspect masks make sense
6236static bool ValidateImageAspectMask(layer_data *dev_data, VkImage image, VkFormat format, VkImageAspectFlags aspect_mask,
6237                                    const char *func_name) {
6238    bool skip = false;
6239    if (vk_format_is_color(format)) {
6240        if ((aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT) != VK_IMAGE_ASPECT_COLOR_BIT) {
6241            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
6242                            (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE",
6243                            "%s: Color image formats must have the VK_IMAGE_ASPECT_COLOR_BIT set. %s", func_name,
6244                            validation_error_map[VALIDATION_ERROR_00741]);
6245        } else if ((aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT) != aspect_mask) {
6246            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
6247                            (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE",
6248                            "%s: Color image formats must have ONLY the VK_IMAGE_ASPECT_COLOR_BIT set. %s", func_name,
6249                            validation_error_map[VALIDATION_ERROR_00741]);
6250        }
6251    } else if (vk_format_is_depth_and_stencil(format)) {
6252        if ((aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) == 0) {
6253            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
6254                            (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE",
6255                            "%s: Depth/stencil image formats must have "
6256                            "at least one of VK_IMAGE_ASPECT_DEPTH_BIT "
6257                            "and VK_IMAGE_ASPECT_STENCIL_BIT set. %s",
6258                            func_name, validation_error_map[VALIDATION_ERROR_00741]);
6259        } else if ((aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) != aspect_mask) {
6260            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
6261                            (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE",
6262                            "%s: Combination depth/stencil image formats can have only the VK_IMAGE_ASPECT_DEPTH_BIT and "
6263                            "VK_IMAGE_ASPECT_STENCIL_BIT set. %s",
6264                            func_name, validation_error_map[VALIDATION_ERROR_00741]);
6265        }
6266    } else if (vk_format_is_depth_only(format)) {
6267        if ((aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT) != VK_IMAGE_ASPECT_DEPTH_BIT) {
6268            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
6269                            (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE",
6270                            "%s: Depth-only image formats must have the VK_IMAGE_ASPECT_DEPTH_BIT set. %s", func_name,
6271                            validation_error_map[VALIDATION_ERROR_00741]);
6272        } else if ((aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT) != aspect_mask) {
6273            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
6274                            (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE",
6275                            "%s: Depth-only image formats can have only the VK_IMAGE_ASPECT_DEPTH_BIT set. %s", func_name,
6276                            validation_error_map[VALIDATION_ERROR_00741]);
6277        }
6278    } else if (vk_format_is_stencil_only(format)) {
6279        if ((aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT) != VK_IMAGE_ASPECT_STENCIL_BIT) {
6280            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
6281                            (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE",
6282                            "%s: Stencil-only image formats must have the VK_IMAGE_ASPECT_STENCIL_BIT set. %s", func_name,
6283                            validation_error_map[VALIDATION_ERROR_00741]);
6284        } else if ((aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT) != aspect_mask) {
6285            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
6286                            (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE",
6287                            "%s: Stencil-only image formats can have only the VK_IMAGE_ASPECT_STENCIL_BIT set. %s", func_name,
6288                            validation_error_map[VALIDATION_ERROR_00741]);
6289        }
6290    }
6291    return skip;
6292}
6293
6294bool ValidateImageSubrangeLevelLayerCounts(layer_data *dev_data, const VkImageSubresourceRange &subresourceRange,
6295                                           const char *func_name) {
6296    bool skip = false;
6297    if (subresourceRange.levelCount == 0) {
6298        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6299                        VALIDATION_ERROR_00768, "IMAGE", "%s called with 0 in subresourceRange.levelCount. %s", func_name,
6300                        validation_error_map[VALIDATION_ERROR_00768]);
6301    }
6302    if (subresourceRange.layerCount == 0) {
6303        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6304                        VALIDATION_ERROR_00769, "IMAGE", "%s called with 0 in subresourceRange.layerCount. %s", func_name,
6305                        validation_error_map[VALIDATION_ERROR_00769]);
6306    }
6307    return skip;
6308}
6309
6310static bool PreCallValidateCreateImageView(layer_data *dev_data, const VkImageViewCreateInfo *create_info) {
6311    bool skip = false;
6312    IMAGE_STATE *image_state = getImageState(dev_data, create_info->image);
6313    if (image_state) {
6314        skip |= ValidateImageUsageFlags(
6315            dev_data, image_state, VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT |
6316                                       VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
6317            false, -1, "vkCreateImageView()",
6318            "VK_IMAGE_USAGE_[SAMPLED|STORAGE|COLOR_ATTACHMENT|DEPTH_STENCIL_ATTACHMENT|INPUT_ATTACHMENT]_BIT");
6319        // If this isn't a sparse image, it needs to have memory backing it at CreateImageView time
6320        skip |= ValidateMemoryIsBoundToImage(dev_data, image_state, "vkCreateImageView()", VALIDATION_ERROR_02524);
6321        // Checks imported from image layer
6322        if (create_info->subresourceRange.baseMipLevel >= image_state->createInfo.mipLevels) {
6323            std::stringstream ss;
6324            ss << "vkCreateImageView called with baseMipLevel " << create_info->subresourceRange.baseMipLevel << " for image "
6325               << create_info->image << " that only has " << image_state->createInfo.mipLevels << " mip levels.";
6326            skip |=
6327                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6328                        VALIDATION_ERROR_00768, "IMAGE", "%s %s", ss.str().c_str(), validation_error_map[VALIDATION_ERROR_00768]);
6329        }
6330        if (create_info->subresourceRange.baseArrayLayer >= image_state->createInfo.arrayLayers) {
6331            std::stringstream ss;
6332            ss << "vkCreateImageView called with baseArrayLayer " << create_info->subresourceRange.baseArrayLayer << " for image "
6333               << create_info->image << " that only has " << image_state->createInfo.arrayLayers << " array layers.";
6334            skip |=
6335                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6336                        VALIDATION_ERROR_00769, "IMAGE", "%s %s", ss.str().c_str(), validation_error_map[VALIDATION_ERROR_00769]);
6337        }
6338        // TODO: Need new valid usage language for levelCount == 0 & layerCount == 0
6339        skip |= ValidateImageSubrangeLevelLayerCounts(dev_data, create_info->subresourceRange, "vkCreateImageView()");
6340
6341        VkImageCreateFlags image_flags = image_state->createInfo.flags;
6342        VkFormat image_format = image_state->createInfo.format;
6343        VkFormat view_format = create_info->format;
6344        VkImageAspectFlags aspect_mask = create_info->subresourceRange.aspectMask;
6345
6346        // Validate VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT state
6347        if (image_flags & VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT) {
6348            // Format MUST be compatible (in the same format compatibility class) as the format the image was created with
6349            if (vk_format_get_compatibility_class(image_format) != vk_format_get_compatibility_class(view_format)) {
6350                std::stringstream ss;
6351                ss << "vkCreateImageView(): ImageView format " << string_VkFormat(view_format)
6352                   << " is not in the same format compatibility class as image (" << (uint64_t)create_info->image << ")  format "
6353                   << string_VkFormat(image_format) << ".  Images created with the VK_IMAGE_CREATE_MUTABLE_FORMAT BIT "
6354                   << "can support ImageViews with differing formats but they must be in the same compatibility class.";
6355                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6356                                VALIDATION_ERROR_02171, "IMAGE", "%s %s", ss.str().c_str(),
6357                                validation_error_map[VALIDATION_ERROR_02171]);
6358            }
6359        } else {
6360            // Format MUST be IDENTICAL to the format the image was created with
6361            if (image_format != view_format) {
6362                std::stringstream ss;
6363                ss << "vkCreateImageView() format " << string_VkFormat(view_format) << " differs from image "
6364                   << (uint64_t)create_info->image << " format " << string_VkFormat(image_format)
6365                   << ".  Formats MUST be IDENTICAL unless VK_IMAGE_CREATE_MUTABLE_FORMAT BIT was set on image creation.";
6366                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6367                                VALIDATION_ERROR_02172, "IMAGE", "%s %s", ss.str().c_str(),
6368                                validation_error_map[VALIDATION_ERROR_02172]);
6369            }
6370        }
6371
6372        // Validate correct image aspect bits for desired formats and format consistency
6373        skip |= ValidateImageAspectMask(dev_data, image_state->image, image_format, aspect_mask, "vkCreateImageView()");
6374    }
6375    return skip;
6376}
6377
6378static inline void PostCallRecordCreateImageView(layer_data *dev_data, const VkImageViewCreateInfo *create_info, VkImageView view) {
6379    dev_data->imageViewMap[view] = unique_ptr<IMAGE_VIEW_STATE>(new IMAGE_VIEW_STATE(view, create_info));
6380    ResolveRemainingLevelsLayers(dev_data, &dev_data->imageViewMap[view].get()->create_info.subresourceRange,
6381                                 getImageState(dev_data, create_info->image));
6382}
6383
6384VKAPI_ATTR VkResult VKAPI_CALL CreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo,
6385                                               const VkAllocationCallbacks *pAllocator, VkImageView *pView) {
6386    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6387    std::unique_lock<std::mutex> lock(global_lock);
6388    bool skip = PreCallValidateCreateImageView(dev_data, pCreateInfo);
6389    lock.unlock();
6390    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
6391    VkResult result = dev_data->dispatch_table.CreateImageView(device, pCreateInfo, pAllocator, pView);
6392    if (VK_SUCCESS == result) {
6393        lock.lock();
6394        PostCallRecordCreateImageView(dev_data, pCreateInfo, *pView);
6395        lock.unlock();
6396    }
6397
6398    return result;
6399}
6400
6401VKAPI_ATTR VkResult VKAPI_CALL CreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo,
6402                                           const VkAllocationCallbacks *pAllocator, VkFence *pFence) {
6403    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6404    VkResult result = dev_data->dispatch_table.CreateFence(device, pCreateInfo, pAllocator, pFence);
6405    if (VK_SUCCESS == result) {
6406        std::lock_guard<std::mutex> lock(global_lock);
6407        auto &fence_node = dev_data->fenceMap[*pFence];
6408        fence_node.fence = *pFence;
6409        fence_node.createInfo = *pCreateInfo;
6410        fence_node.state = (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) ? FENCE_RETIRED : FENCE_UNSIGNALED;
6411    }
6412    return result;
6413}
6414
6415// TODO handle pipeline caches
6416VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo *pCreateInfo,
6417                                                   const VkAllocationCallbacks *pAllocator, VkPipelineCache *pPipelineCache) {
6418    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6419    VkResult result = dev_data->dispatch_table.CreatePipelineCache(device, pCreateInfo, pAllocator, pPipelineCache);
6420    return result;
6421}
6422
6423VKAPI_ATTR void VKAPI_CALL DestroyPipelineCache(VkDevice device, VkPipelineCache pipelineCache,
6424                                                const VkAllocationCallbacks *pAllocator) {
6425    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6426    dev_data->dispatch_table.DestroyPipelineCache(device, pipelineCache, pAllocator);
6427}
6428
6429VKAPI_ATTR VkResult VKAPI_CALL GetPipelineCacheData(VkDevice device, VkPipelineCache pipelineCache, size_t *pDataSize,
6430                                                    void *pData) {
6431    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6432    VkResult result = dev_data->dispatch_table.GetPipelineCacheData(device, pipelineCache, pDataSize, pData);
6433    return result;
6434}
6435
6436VKAPI_ATTR VkResult VKAPI_CALL MergePipelineCaches(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount,
6437                                                   const VkPipelineCache *pSrcCaches) {
6438    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6439    VkResult result = dev_data->dispatch_table.MergePipelineCaches(device, dstCache, srcCacheCount, pSrcCaches);
6440    return result;
6441}
6442
6443// utility function to set collective state for pipeline
6444void set_pipeline_state(PIPELINE_STATE *pPipe) {
6445    // If any attachment used by this pipeline has blendEnable, set top-level blendEnable
6446    if (pPipe->graphicsPipelineCI.pColorBlendState) {
6447        for (size_t i = 0; i < pPipe->attachments.size(); ++i) {
6448            if (VK_TRUE == pPipe->attachments[i].blendEnable) {
6449                if (((pPipe->attachments[i].dstAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6450                     (pPipe->attachments[i].dstAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6451                    ((pPipe->attachments[i].dstColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6452                     (pPipe->attachments[i].dstColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6453                    ((pPipe->attachments[i].srcAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6454                     (pPipe->attachments[i].srcAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6455                    ((pPipe->attachments[i].srcColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6456                     (pPipe->attachments[i].srcColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA))) {
6457                    pPipe->blendConstantsEnabled = true;
6458                }
6459            }
6460        }
6461    }
6462}
6463
6464static bool PreCallCreateGraphicsPipelines(layer_data *device_data, uint32_t count,
6465                                           const VkGraphicsPipelineCreateInfo *create_infos, vector<PIPELINE_STATE *> &pipe_state) {
6466    bool skip = false;
6467    instance_layer_data *instance_data =
6468        get_my_data_ptr(get_dispatch_key(device_data->instance_data->instance), instance_layer_data_map);
6469
6470    for (uint32_t i = 0; i < count; i++) {
6471        skip |= verifyPipelineCreateState(device_data, pipe_state, i);
6472        if (create_infos[i].pVertexInputState != NULL) {
6473            for (uint32_t j = 0; j < create_infos[i].pVertexInputState->vertexAttributeDescriptionCount; j++) {
6474                VkFormat format = create_infos[i].pVertexInputState->pVertexAttributeDescriptions[j].format;
6475                // Internal call to get format info.  Still goes through layers, could potentially go directly to ICD.
6476                VkFormatProperties properties;
6477                instance_data->dispatch_table.GetPhysicalDeviceFormatProperties(device_data->physical_device, format, &properties);
6478                if ((properties.bufferFeatures & VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT) == 0) {
6479                    skip |= log_msg(
6480                        device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
6481                        __LINE__, VALIDATION_ERROR_01413, "IMAGE",
6482                        "vkCreateGraphicsPipelines: pCreateInfo[%d].pVertexInputState->vertexAttributeDescriptions[%d].format "
6483                        "(%s) is not a supported vertex buffer format. %s",
6484                        i, j, string_VkFormat(format), validation_error_map[VALIDATION_ERROR_01413]);
6485                }
6486            }
6487        }
6488    }
6489    return skip;
6490}
6491
6492VKAPI_ATTR VkResult VKAPI_CALL CreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
6493                                                       const VkGraphicsPipelineCreateInfo *pCreateInfos,
6494                                                       const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines) {
6495    // TODO What to do with pipelineCache?
6496    // The order of operations here is a little convoluted but gets the job done
6497    //  1. Pipeline create state is first shadowed into PIPELINE_STATE struct
6498    //  2. Create state is then validated (which uses flags setup during shadowing)
6499    //  3. If everything looks good, we'll then create the pipeline and add NODE to pipelineMap
6500    bool skip = false;
6501    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
6502    vector<PIPELINE_STATE *> pipe_state(count);
6503    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6504
6505    uint32_t i = 0;
6506    std::unique_lock<std::mutex> lock(global_lock);
6507
6508    for (i = 0; i < count; i++) {
6509        pipe_state[i] = new PIPELINE_STATE;
6510        pipe_state[i]->initGraphicsPipeline(&pCreateInfos[i]);
6511        pipe_state[i]->render_pass_ci.initialize(getRenderPassState(dev_data, pCreateInfos[i].renderPass)->createInfo.ptr());
6512        pipe_state[i]->pipeline_layout = *getPipelineLayout(dev_data, pCreateInfos[i].layout);
6513    }
6514    skip |= PreCallCreateGraphicsPipelines(dev_data, count, pCreateInfos, pipe_state);
6515
6516    if (skip) {
6517        for (i = 0; i < count; i++) {
6518            delete pipe_state[i];
6519            pPipelines[i] = VK_NULL_HANDLE;
6520        }
6521        return VK_ERROR_VALIDATION_FAILED_EXT;
6522    }
6523
6524    lock.unlock();
6525    auto result =
6526        dev_data->dispatch_table.CreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines);
6527    lock.lock();
6528    for (i = 0; i < count; i++) {
6529        if (pPipelines[i] == VK_NULL_HANDLE) {
6530            delete pipe_state[i];
6531        } else {
6532            pipe_state[i]->pipeline = pPipelines[i];
6533            dev_data->pipelineMap[pipe_state[i]->pipeline] = pipe_state[i];
6534        }
6535    }
6536
6537    return result;
6538}
6539
6540VKAPI_ATTR VkResult VKAPI_CALL CreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
6541                                                      const VkComputePipelineCreateInfo *pCreateInfos,
6542                                                      const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines) {
6543    bool skip = false;
6544
6545    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
6546    vector<PIPELINE_STATE *> pPipeState(count);
6547    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6548
6549    uint32_t i = 0;
6550    std::unique_lock<std::mutex> lock(global_lock);
6551    for (i = 0; i < count; i++) {
6552        // TODO: Verify compute stage bits
6553
6554        // Create and initialize internal tracking data structure
6555        pPipeState[i] = new PIPELINE_STATE;
6556        pPipeState[i]->initComputePipeline(&pCreateInfos[i]);
6557        pPipeState[i]->pipeline_layout = *getPipelineLayout(dev_data, pCreateInfos[i].layout);
6558
6559        // TODO: Add Compute Pipeline Verification
6560        skip |= !validate_compute_pipeline(dev_data->report_data, pPipeState[i], &dev_data->enabled_features,
6561                                           dev_data->shaderModuleMap);
6562        // skip |= verifyPipelineCreateState(dev_data, pPipeState[i]);
6563    }
6564
6565    if (skip) {
6566        for (i = 0; i < count; i++) {
6567            // Clean up any locally allocated data structures
6568            delete pPipeState[i];
6569            pPipelines[i] = VK_NULL_HANDLE;
6570        }
6571        return VK_ERROR_VALIDATION_FAILED_EXT;
6572    }
6573
6574    lock.unlock();
6575    auto result =
6576        dev_data->dispatch_table.CreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines);
6577    lock.lock();
6578    for (i = 0; i < count; i++) {
6579        if (pPipelines[i] == VK_NULL_HANDLE) {
6580            delete pPipeState[i];
6581        } else {
6582            pPipeState[i]->pipeline = pPipelines[i];
6583            dev_data->pipelineMap[pPipeState[i]->pipeline] = pPipeState[i];
6584        }
6585    }
6586
6587    return result;
6588}
6589
6590VKAPI_ATTR VkResult VKAPI_CALL CreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo,
6591                                             const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) {
6592    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6593    VkResult result = dev_data->dispatch_table.CreateSampler(device, pCreateInfo, pAllocator, pSampler);
6594    if (VK_SUCCESS == result) {
6595        std::lock_guard<std::mutex> lock(global_lock);
6596        dev_data->samplerMap[*pSampler] = unique_ptr<SAMPLER_STATE>(new SAMPLER_STATE(pSampler, pCreateInfo));
6597    }
6598    return result;
6599}
6600
6601static bool PreCallValidateCreateDescriptorSetLayout(layer_data *dev_data, const VkDescriptorSetLayoutCreateInfo *create_info) {
6602    if (dev_data->instance_data->disabled.create_descriptor_set_layout) return false;
6603    return cvdescriptorset::DescriptorSetLayout::ValidateCreateInfo(dev_data->report_data, create_info);
6604}
6605
6606static void PostCallRecordCreateDescriptorSetLayout(layer_data *dev_data, const VkDescriptorSetLayoutCreateInfo *create_info,
6607                                                    VkDescriptorSetLayout set_layout) {
6608    // TODO: Convert this to unique_ptr to avoid leaks
6609    dev_data->descriptorSetLayoutMap[set_layout] = new cvdescriptorset::DescriptorSetLayout(create_info, set_layout);
6610}
6611
6612VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
6613                                                         const VkAllocationCallbacks *pAllocator,
6614                                                         VkDescriptorSetLayout *pSetLayout) {
6615    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6616    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
6617    std::unique_lock<std::mutex> lock(global_lock);
6618    bool skip = PreCallValidateCreateDescriptorSetLayout(dev_data, pCreateInfo);
6619    if (!skip) {
6620        lock.unlock();
6621        result = dev_data->dispatch_table.CreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout);
6622        if (VK_SUCCESS == result) {
6623            lock.lock();
6624            PostCallRecordCreateDescriptorSetLayout(dev_data, pCreateInfo, *pSetLayout);
6625        }
6626    }
6627    return result;
6628}
6629
6630// Used by CreatePipelineLayout and CmdPushConstants.
6631// Note that the index argument is optional and only used by CreatePipelineLayout.
6632static bool validatePushConstantRange(const layer_data *dev_data, const uint32_t offset, const uint32_t size,
6633                                      const char *caller_name, uint32_t index = 0) {
6634    if (dev_data->instance_data->disabled.push_constant_range) return false;
6635    uint32_t const maxPushConstantsSize = dev_data->phys_dev_properties.properties.limits.maxPushConstantsSize;
6636    bool skip_call = false;
6637    // Check that offset + size don't exceed the max.
6638    // Prevent arithetic overflow here by avoiding addition and testing in this order.
6639    if ((offset >= maxPushConstantsSize) || (size > maxPushConstantsSize - offset)) {
6640        // This is a pain just to adapt the log message to the caller, but better to sort it out only when there is a problem.
6641        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
6642            if (offset >= maxPushConstantsSize) {
6643                skip_call |=
6644                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6645                            VALIDATION_ERROR_00877, "DS",
6646                            "%s call has push constants index %u with offset %u that "
6647                            "exceeds this device's maxPushConstantSize of %u. %s",
6648                            caller_name, index, offset, maxPushConstantsSize, validation_error_map[VALIDATION_ERROR_00877]);
6649            }
6650            if (size > maxPushConstantsSize - offset) {
6651                skip_call |=
6652                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6653                            VALIDATION_ERROR_00880, "DS",
6654                            "%s call has push constants index %u with offset %u and size %u that "
6655                            "exceeds this device's maxPushConstantSize of %u. %s",
6656                            caller_name, index, offset, size, maxPushConstantsSize, validation_error_map[VALIDATION_ERROR_00880]);
6657            }
6658        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
6659            if (offset >= maxPushConstantsSize) {
6660                skip_call |=
6661                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6662                            VALIDATION_ERROR_00991, "DS",
6663                            "%s call has push constants index %u with offset %u that "
6664                            "exceeds this device's maxPushConstantSize of %u. %s",
6665                            caller_name, index, offset, maxPushConstantsSize, validation_error_map[VALIDATION_ERROR_00991]);
6666            }
6667            if (size > maxPushConstantsSize - offset) {
6668                skip_call |=
6669                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6670                            VALIDATION_ERROR_00992, "DS",
6671                            "%s call has push constants index %u with offset %u and size %u that "
6672                            "exceeds this device's maxPushConstantSize of %u. %s",
6673                            caller_name, index, offset, size, maxPushConstantsSize, validation_error_map[VALIDATION_ERROR_00992]);
6674            }
6675        } else {
6676            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6677                                 DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
6678        }
6679    }
6680    // size needs to be non-zero and a multiple of 4.
6681    if ((size == 0) || ((size & 0x3) != 0)) {
6682        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
6683            if (size == 0) {
6684                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
6685                                     __LINE__, VALIDATION_ERROR_00878, "DS",
6686                                     "%s call has push constants index %u with "
6687                                     "size %u. Size must be greater than zero. %s",
6688                                     caller_name, index, size, validation_error_map[VALIDATION_ERROR_00878]);
6689            }
6690            if (size & 0x3) {
6691                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
6692                                     __LINE__, VALIDATION_ERROR_00879, "DS",
6693                                     "%s call has push constants index %u with "
6694                                     "size %u. Size must be a multiple of 4. %s",
6695                                     caller_name, index, size, validation_error_map[VALIDATION_ERROR_00879]);
6696            }
6697        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
6698            if (size == 0) {
6699                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
6700                                     __LINE__, VALIDATION_ERROR_01000, "DS",
6701                                     "%s call has push constants index %u with "
6702                                     "size %u. Size must be greater than zero. %s",
6703                                     caller_name, index, size, validation_error_map[VALIDATION_ERROR_01000]);
6704            }
6705            if (size & 0x3) {
6706                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
6707                                     __LINE__, VALIDATION_ERROR_00990, "DS",
6708                                     "%s call has push constants index %u with "
6709                                     "size %u. Size must be a multiple of 4. %s",
6710                                     caller_name, index, size, validation_error_map[VALIDATION_ERROR_00990]);
6711            }
6712        } else {
6713            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6714                                 DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
6715        }
6716    }
6717    // offset needs to be a multiple of 4.
6718    if ((offset & 0x3) != 0) {
6719        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
6720            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6721                                 VALIDATION_ERROR_02521, "DS",
6722                                 "%s call has push constants index %u with "
6723                                 "offset %u. Offset must be a multiple of 4. %s",
6724                                 caller_name, index, offset, validation_error_map[VALIDATION_ERROR_02521]);
6725        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
6726            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6727                                 VALIDATION_ERROR_00989, "DS",
6728                                 "%s call has push constants with "
6729                                 "offset %u. Offset must be a multiple of 4. %s",
6730                                 caller_name, offset, validation_error_map[VALIDATION_ERROR_00989]);
6731        } else {
6732            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6733                                 DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
6734        }
6735    }
6736    return skip_call;
6737}
6738
6739VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
6740                                                    const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout) {
6741    bool skip_call = false;
6742    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6743    // TODO : Add checks for VALIDATION_ERRORS 865-871
6744    // Push Constant Range checks
6745    uint32_t i, j;
6746    for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
6747        skip_call |= validatePushConstantRange(dev_data, pCreateInfo->pPushConstantRanges[i].offset,
6748                                               pCreateInfo->pPushConstantRanges[i].size, "vkCreatePipelineLayout()", i);
6749        if (0 == pCreateInfo->pPushConstantRanges[i].stageFlags) {
6750            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6751                                 VALIDATION_ERROR_00882, "DS", "vkCreatePipelineLayout() call has no stageFlags set. %s",
6752                                 validation_error_map[VALIDATION_ERROR_00882]);
6753        }
6754    }
6755    if (skip_call) return VK_ERROR_VALIDATION_FAILED_EXT;
6756
6757    // Each range has been validated.  Now check for overlap between ranges (if they are good).
6758    // There's no explicit Valid Usage language against this, so issue a warning instead of an error.
6759    for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
6760        for (j = i + 1; j < pCreateInfo->pushConstantRangeCount; ++j) {
6761            const uint32_t minA = pCreateInfo->pPushConstantRanges[i].offset;
6762            const uint32_t maxA = minA + pCreateInfo->pPushConstantRanges[i].size;
6763            const uint32_t minB = pCreateInfo->pPushConstantRanges[j].offset;
6764            const uint32_t maxB = minB + pCreateInfo->pPushConstantRanges[j].size;
6765            if ((minA <= minB && maxA > minB) || (minB <= minA && maxB > minA)) {
6766                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
6767                                     __LINE__, DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS",
6768                                     "vkCreatePipelineLayout() call has push constants with "
6769                                     "overlapping ranges: %u:[%u, %u), %u:[%u, %u)",
6770                                     i, minA, maxA, j, minB, maxB);
6771            }
6772        }
6773    }
6774
6775    VkResult result = dev_data->dispatch_table.CreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout);
6776    if (VK_SUCCESS == result) {
6777        std::lock_guard<std::mutex> lock(global_lock);
6778        PIPELINE_LAYOUT_NODE &plNode = dev_data->pipelineLayoutMap[*pPipelineLayout];
6779        plNode.layout = *pPipelineLayout;
6780        plNode.set_layouts.resize(pCreateInfo->setLayoutCount);
6781        for (i = 0; i < pCreateInfo->setLayoutCount; ++i) {
6782            plNode.set_layouts[i] = getDescriptorSetLayout(dev_data, pCreateInfo->pSetLayouts[i]);
6783        }
6784        plNode.push_constant_ranges.resize(pCreateInfo->pushConstantRangeCount);
6785        for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
6786            plNode.push_constant_ranges[i] = pCreateInfo->pPushConstantRanges[i];
6787        }
6788    }
6789    return result;
6790}
6791
6792VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreateInfo,
6793                                                    const VkAllocationCallbacks *pAllocator, VkDescriptorPool *pDescriptorPool) {
6794    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6795    VkResult result = dev_data->dispatch_table.CreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool);
6796    if (VK_SUCCESS == result) {
6797        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6798                    (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS", "Created Descriptor Pool 0x%" PRIxLEAST64,
6799                    (uint64_t)*pDescriptorPool))
6800            return VK_ERROR_VALIDATION_FAILED_EXT;
6801        DESCRIPTOR_POOL_STATE *pNewNode = new DESCRIPTOR_POOL_STATE(*pDescriptorPool, pCreateInfo);
6802        if (NULL == pNewNode) {
6803            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6804                        (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
6805                        "Out of memory while attempting to allocate DESCRIPTOR_POOL_STATE in vkCreateDescriptorPool()"))
6806                return VK_ERROR_VALIDATION_FAILED_EXT;
6807        } else {
6808            std::lock_guard<std::mutex> lock(global_lock);
6809            dev_data->descriptorPoolMap[*pDescriptorPool] = pNewNode;
6810        }
6811    } else {
6812        // Need to do anything if pool create fails?
6813    }
6814    return result;
6815}
6816
6817VKAPI_ATTR VkResult VKAPI_CALL ResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
6818                                                   VkDescriptorPoolResetFlags flags) {
6819    // TODO : Add checks for VALIDATION_ERROR_00928
6820    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6821    VkResult result = dev_data->dispatch_table.ResetDescriptorPool(device, descriptorPool, flags);
6822    if (VK_SUCCESS == result) {
6823        std::lock_guard<std::mutex> lock(global_lock);
6824        clearDescriptorPool(dev_data, device, descriptorPool, flags);
6825    }
6826    return result;
6827}
6828// Ensure the pool contains enough descriptors and descriptor sets to satisfy
6829// an allocation request. Fills common_data with the total number of descriptors of each type required,
6830// as well as DescriptorSetLayout ptrs used for later update.
6831static bool PreCallValidateAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo,
6832                                                  cvdescriptorset::AllocateDescriptorSetsData *common_data) {
6833    if (dev_data->instance_data->disabled.allocate_descriptor_sets) return false;
6834    // All state checks for AllocateDescriptorSets is done in single function
6835    return cvdescriptorset::ValidateAllocateDescriptorSets(dev_data->report_data, pAllocateInfo, dev_data, common_data);
6836}
6837// Allocation state was good and call down chain was made so update state based on allocating descriptor sets
6838static void PostCallRecordAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo,
6839                                                 VkDescriptorSet *pDescriptorSets,
6840                                                 const cvdescriptorset::AllocateDescriptorSetsData *common_data) {
6841    // All the updates are contained in a single cvdescriptorset function
6842    cvdescriptorset::PerformAllocateDescriptorSets(pAllocateInfo, pDescriptorSets, common_data, &dev_data->descriptorPoolMap,
6843                                                   &dev_data->setMap, dev_data);
6844}
6845
6846VKAPI_ATTR VkResult VKAPI_CALL AllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo,
6847                                                      VkDescriptorSet *pDescriptorSets) {
6848    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6849    std::unique_lock<std::mutex> lock(global_lock);
6850    cvdescriptorset::AllocateDescriptorSetsData common_data(pAllocateInfo->descriptorSetCount);
6851    bool skip_call = PreCallValidateAllocateDescriptorSets(dev_data, pAllocateInfo, &common_data);
6852    lock.unlock();
6853
6854    if (skip_call) return VK_ERROR_VALIDATION_FAILED_EXT;
6855
6856    VkResult result = dev_data->dispatch_table.AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
6857
6858    if (VK_SUCCESS == result) {
6859        lock.lock();
6860        PostCallRecordAllocateDescriptorSets(dev_data, pAllocateInfo, pDescriptorSets, &common_data);
6861        lock.unlock();
6862    }
6863    return result;
6864}
6865// Verify state before freeing DescriptorSets
6866static bool PreCallValidateFreeDescriptorSets(const layer_data *dev_data, VkDescriptorPool pool, uint32_t count,
6867                                              const VkDescriptorSet *descriptor_sets) {
6868    if (dev_data->instance_data->disabled.free_descriptor_sets) return false;
6869    bool skip_call = false;
6870    // First make sure sets being destroyed are not currently in-use
6871    for (uint32_t i = 0; i < count; ++i) {
6872        if (descriptor_sets[i] != VK_NULL_HANDLE) {
6873            skip_call |= validateIdleDescriptorSet(dev_data, descriptor_sets[i], "vkFreeDescriptorSets");
6874        }
6875    }
6876
6877    DESCRIPTOR_POOL_STATE *pool_state = getDescriptorPoolState(dev_data, pool);
6878    if (pool_state && !(VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT & pool_state->createInfo.flags)) {
6879        // Can't Free from a NON_FREE pool
6880        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6881                             reinterpret_cast<uint64_t &>(pool), __LINE__, VALIDATION_ERROR_00922, "DS",
6882                             "It is invalid to call vkFreeDescriptorSets() with a pool created without setting "
6883                             "VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT. %s",
6884                             validation_error_map[VALIDATION_ERROR_00922]);
6885    }
6886    return skip_call;
6887}
6888// Sets have been removed from the pool so update underlying state
6889static void PostCallRecordFreeDescriptorSets(layer_data *dev_data, VkDescriptorPool pool, uint32_t count,
6890                                             const VkDescriptorSet *descriptor_sets) {
6891    DESCRIPTOR_POOL_STATE *pool_state = getDescriptorPoolState(dev_data, pool);
6892    // Update available descriptor sets in pool
6893    pool_state->availableSets += count;
6894
6895    // For each freed descriptor add its resources back into the pool as available and remove from pool and setMap
6896    for (uint32_t i = 0; i < count; ++i) {
6897        if (descriptor_sets[i] != VK_NULL_HANDLE) {
6898            auto descriptor_set = dev_data->setMap[descriptor_sets[i]];
6899            uint32_t type_index = 0, descriptor_count = 0;
6900            for (uint32_t j = 0; j < descriptor_set->GetBindingCount(); ++j) {
6901                type_index = static_cast<uint32_t>(descriptor_set->GetTypeFromIndex(j));
6902                descriptor_count = descriptor_set->GetDescriptorCountFromIndex(j);
6903                pool_state->availableDescriptorTypeCount[type_index] += descriptor_count;
6904            }
6905            freeDescriptorSet(dev_data, descriptor_set);
6906            pool_state->sets.erase(descriptor_set);
6907        }
6908    }
6909}
6910
6911VKAPI_ATTR VkResult VKAPI_CALL FreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count,
6912                                                  const VkDescriptorSet *pDescriptorSets) {
6913    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6914    // Make sure that no sets being destroyed are in-flight
6915    std::unique_lock<std::mutex> lock(global_lock);
6916    bool skip_call = PreCallValidateFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
6917    lock.unlock();
6918
6919    if (skip_call) return VK_ERROR_VALIDATION_FAILED_EXT;
6920    VkResult result = dev_data->dispatch_table.FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets);
6921    if (VK_SUCCESS == result) {
6922        lock.lock();
6923        PostCallRecordFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
6924        lock.unlock();
6925    }
6926    return result;
6927}
6928// TODO : This is a Proof-of-concept for core validation architecture
6929//  Really we'll want to break out these functions to separate files but
6930//  keeping it all together here to prove out design
6931// PreCallValidate* handles validating all of the state prior to calling down chain to UpdateDescriptorSets()
6932static bool PreCallValidateUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
6933                                                const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
6934                                                const VkCopyDescriptorSet *pDescriptorCopies) {
6935    if (dev_data->instance_data->disabled.update_descriptor_sets) return false;
6936    // First thing to do is perform map look-ups.
6937    // NOTE : UpdateDescriptorSets is somewhat unique in that it's operating on a number of DescriptorSets
6938    //  so we can't just do a single map look-up up-front, but do them individually in functions below
6939
6940    // Now make call(s) that validate state, but don't perform state updates in this function
6941    // Note, here DescriptorSets is unique in that we don't yet have an instance. Using a helper function in the
6942    //  namespace which will parse params and make calls into specific class instances
6943    return cvdescriptorset::ValidateUpdateDescriptorSets(dev_data->report_data, dev_data, descriptorWriteCount, pDescriptorWrites,
6944                                                         descriptorCopyCount, pDescriptorCopies);
6945}
6946// PostCallRecord* handles recording state updates following call down chain to UpdateDescriptorSets()
6947static void PostCallRecordUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
6948                                               const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
6949                                               const VkCopyDescriptorSet *pDescriptorCopies) {
6950    cvdescriptorset::PerformUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6951                                                 pDescriptorCopies);
6952}
6953
6954VKAPI_ATTR void VKAPI_CALL UpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount,
6955                                                const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
6956                                                const VkCopyDescriptorSet *pDescriptorCopies) {
6957    // Only map look-up at top level is for device-level layer_data
6958    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6959    std::unique_lock<std::mutex> lock(global_lock);
6960    bool skip_call = PreCallValidateUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6961                                                         pDescriptorCopies);
6962    lock.unlock();
6963    if (!skip_call) {
6964        dev_data->dispatch_table.UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6965                                                      pDescriptorCopies);
6966        lock.lock();
6967        // Since UpdateDescriptorSets() is void, nothing to check prior to updating state
6968        PostCallRecordUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6969                                           pDescriptorCopies);
6970    }
6971}
6972
6973VKAPI_ATTR VkResult VKAPI_CALL AllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pCreateInfo,
6974                                                      VkCommandBuffer *pCommandBuffer) {
6975    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6976    VkResult result = dev_data->dispatch_table.AllocateCommandBuffers(device, pCreateInfo, pCommandBuffer);
6977    if (VK_SUCCESS == result) {
6978        std::unique_lock<std::mutex> lock(global_lock);
6979        auto pPool = getCommandPoolNode(dev_data, pCreateInfo->commandPool);
6980
6981        if (pPool) {
6982            for (uint32_t i = 0; i < pCreateInfo->commandBufferCount; i++) {
6983                // Add command buffer to its commandPool map
6984                pPool->commandBuffers.push_back(pCommandBuffer[i]);
6985                GLOBAL_CB_NODE *pCB = new GLOBAL_CB_NODE;
6986                // Add command buffer to map
6987                dev_data->commandBufferMap[pCommandBuffer[i]] = pCB;
6988                resetCB(dev_data, pCommandBuffer[i]);
6989                pCB->createInfo = *pCreateInfo;
6990                pCB->device = device;
6991            }
6992        }
6993        lock.unlock();
6994    }
6995    return result;
6996}
6997
6998// Add bindings between the given cmd buffer & framebuffer and the framebuffer's children
6999static void AddFramebufferBinding(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, FRAMEBUFFER_STATE *fb_state) {
7000    addCommandBufferBinding(&fb_state->cb_bindings,
7001                            {reinterpret_cast<uint64_t &>(fb_state->framebuffer), VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT},
7002                            cb_state);
7003    for (auto attachment : fb_state->attachments) {
7004        auto view_state = attachment.view_state;
7005        if (view_state) {
7006            AddCommandBufferBindingImageView(dev_data, cb_state, view_state);
7007        }
7008        auto rp_state = getRenderPassState(dev_data, fb_state->createInfo.renderPass);
7009        if (rp_state) {
7010            addCommandBufferBinding(
7011                &rp_state->cb_bindings,
7012                {reinterpret_cast<uint64_t &>(rp_state->renderPass), VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT}, cb_state);
7013        }
7014    }
7015}
7016
7017VKAPI_ATTR VkResult VKAPI_CALL BeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) {
7018    bool skip_call = false;
7019    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7020    std::unique_lock<std::mutex> lock(global_lock);
7021    // Validate command buffer level
7022    GLOBAL_CB_NODE *cb_node = getCBNode(dev_data, commandBuffer);
7023    if (cb_node) {
7024        // This implicitly resets the Cmd Buffer so make sure any fence is done and then clear memory references
7025        if (dev_data->globalInFlightCmdBuffers.count(commandBuffer)) {
7026            skip_call |=
7027                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7028                        (uint64_t)commandBuffer, __LINE__, VALIDATION_ERROR_00104, "MEM",
7029                        "Calling vkBeginCommandBuffer() on active command buffer 0x%p before it has completed. "
7030                        "You must check command buffer fence before this call. %s",
7031                        commandBuffer, validation_error_map[VALIDATION_ERROR_00104]);
7032        }
7033        clear_cmd_buf_and_mem_references(dev_data, cb_node);
7034        if (cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
7035            // Secondary Command Buffer
7036            const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo;
7037            if (!pInfo) {
7038                skip_call |=
7039                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7040                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, VALIDATION_ERROR_00106, "DS",
7041                            "vkBeginCommandBuffer(): Secondary Command Buffer (0x%p) must have inheritance info. %s", commandBuffer,
7042                            validation_error_map[VALIDATION_ERROR_00106]);
7043            } else {
7044                if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
7045                    // Object_tracker makes sure these objects are valid
7046                    assert(pInfo->renderPass);
7047                    assert(pInfo->framebuffer);
7048                    string errorString = "";
7049                    auto framebuffer = getFramebufferState(dev_data, pInfo->framebuffer);
7050                    if (framebuffer) {
7051                        if ((framebuffer->createInfo.renderPass != pInfo->renderPass) &&
7052                            !verify_renderpass_compatibility(dev_data, framebuffer->renderPassCreateInfo.ptr(),
7053                                                             getRenderPassState(dev_data, pInfo->renderPass)->createInfo.ptr(),
7054                                                             errorString)) {
7055                            // renderPass that framebuffer was created with must be compatible with local renderPass
7056                            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7057                                                 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7058                                                 reinterpret_cast<uint64_t>(commandBuffer), __LINE__, VALIDATION_ERROR_00112, "DS",
7059                                                 "vkBeginCommandBuffer(): Secondary Command "
7060                                                 "Buffer (0x%p) renderPass (0x%" PRIxLEAST64
7061                                                 ") is incompatible w/ framebuffer "
7062                                                 "(0x%" PRIxLEAST64 ") w/ render pass (0x%" PRIxLEAST64 ") due to: %s. %s",
7063                                                 commandBuffer, reinterpret_cast<const uint64_t &>(pInfo->renderPass),
7064                                                 reinterpret_cast<const uint64_t &>(pInfo->framebuffer),
7065                                                 reinterpret_cast<uint64_t &>(framebuffer->createInfo.renderPass),
7066                                                 errorString.c_str(), validation_error_map[VALIDATION_ERROR_00112]);
7067                        }
7068                        // Connect this framebuffer and its children to this cmdBuffer
7069                        AddFramebufferBinding(dev_data, cb_node, framebuffer);
7070                    }
7071                }
7072                if ((pInfo->occlusionQueryEnable == VK_FALSE || dev_data->enabled_features.occlusionQueryPrecise == VK_FALSE) &&
7073                    (pInfo->queryFlags & VK_QUERY_CONTROL_PRECISE_BIT)) {
7074                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7075                                         VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(commandBuffer),
7076                                         __LINE__, VALIDATION_ERROR_00107, "DS",
7077                                         "vkBeginCommandBuffer(): Secondary Command Buffer (0x%p) must not have "
7078                                         "VK_QUERY_CONTROL_PRECISE_BIT if occulusionQuery is disabled or the device does not "
7079                                         "support precise occlusion queries. %s",
7080                                         commandBuffer, validation_error_map[VALIDATION_ERROR_00107]);
7081                }
7082            }
7083            if (pInfo && pInfo->renderPass != VK_NULL_HANDLE) {
7084                auto renderPass = getRenderPassState(dev_data, pInfo->renderPass);
7085                if (renderPass) {
7086                    if (pInfo->subpass >= renderPass->createInfo.subpassCount) {
7087                        skip_call |= log_msg(
7088                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7089                            (uint64_t)commandBuffer, __LINE__, VALIDATION_ERROR_00111, "DS",
7090                            "vkBeginCommandBuffer(): Secondary Command Buffers (0x%p) must have a subpass index (%d) "
7091                            "that is less than the number of subpasses (%d). %s",
7092                            commandBuffer, pInfo->subpass, renderPass->createInfo.subpassCount,
7093                            validation_error_map[VALIDATION_ERROR_00111]);
7094                    }
7095                }
7096            }
7097        }
7098        if (CB_RECORDING == cb_node->state) {
7099            skip_call |=
7100                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7101                        (uint64_t)commandBuffer, __LINE__, VALIDATION_ERROR_00103, "DS",
7102                        "vkBeginCommandBuffer(): Cannot call Begin on command buffer (0x%p"
7103                        ") in the RECORDING state. Must first call vkEndCommandBuffer(). %s",
7104                        commandBuffer, validation_error_map[VALIDATION_ERROR_00103]);
7105        } else if (CB_RECORDED == cb_node->state || (CB_INVALID == cb_node->state && CMD_END == cb_node->last_cmd)) {
7106            VkCommandPool cmdPool = cb_node->createInfo.commandPool;
7107            auto pPool = getCommandPoolNode(dev_data, cmdPool);
7108            if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
7109                skip_call |=
7110                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7111                            (uint64_t)commandBuffer, __LINE__, VALIDATION_ERROR_00105, "DS",
7112                            "Call to vkBeginCommandBuffer() on command buffer (0x%p"
7113                            ") attempts to implicitly reset cmdBuffer created from command pool (0x%" PRIxLEAST64
7114                            ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set. %s",
7115                            commandBuffer, (uint64_t)cmdPool, validation_error_map[VALIDATION_ERROR_00105]);
7116            }
7117            resetCB(dev_data, commandBuffer);
7118        }
7119        // Set updated state here in case implicit reset occurs above
7120        cb_node->state = CB_RECORDING;
7121        cb_node->beginInfo = *pBeginInfo;
7122        if (cb_node->beginInfo.pInheritanceInfo) {
7123            cb_node->inheritanceInfo = *(cb_node->beginInfo.pInheritanceInfo);
7124            cb_node->beginInfo.pInheritanceInfo = &cb_node->inheritanceInfo;
7125            // If we are a secondary command-buffer and inheriting.  Update the items we should inherit.
7126            if ((cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) &&
7127                (cb_node->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
7128                cb_node->activeRenderPass = getRenderPassState(dev_data, cb_node->beginInfo.pInheritanceInfo->renderPass);
7129                cb_node->activeSubpass = cb_node->beginInfo.pInheritanceInfo->subpass;
7130                cb_node->activeFramebuffer = cb_node->beginInfo.pInheritanceInfo->framebuffer;
7131                cb_node->framebuffers.insert(cb_node->beginInfo.pInheritanceInfo->framebuffer);
7132            }
7133        }
7134    }
7135    lock.unlock();
7136    if (skip_call) {
7137        return VK_ERROR_VALIDATION_FAILED_EXT;
7138    }
7139    VkResult result = dev_data->dispatch_table.BeginCommandBuffer(commandBuffer, pBeginInfo);
7140
7141    return result;
7142}
7143
7144VKAPI_ATTR VkResult VKAPI_CALL EndCommandBuffer(VkCommandBuffer commandBuffer) {
7145    bool skip_call = false;
7146    VkResult result = VK_SUCCESS;
7147    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7148    std::unique_lock<std::mutex> lock(global_lock);
7149    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7150    if (pCB) {
7151        if ((VK_COMMAND_BUFFER_LEVEL_PRIMARY == pCB->createInfo.level) ||
7152            !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
7153            // This needs spec clarification to update valid usage, see comments in PR:
7154            // https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/pull/516#discussion_r63013756
7155            skip_call |= insideRenderPass(dev_data, pCB, "vkEndCommandBuffer()", VALIDATION_ERROR_00123);
7156        }
7157        skip_call |= ValidateCmd(dev_data, pCB, CMD_END, "vkEndCommandBuffer()");
7158        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_END);
7159        for (auto query : pCB->activeQueries) {
7160            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7161                                 VALIDATION_ERROR_00124, "DS",
7162                                 "Ending command buffer with in progress query: queryPool 0x%" PRIx64 ", index %d. %s",
7163                                 (uint64_t)(query.pool), query.index, validation_error_map[VALIDATION_ERROR_00124]);
7164        }
7165    }
7166    if (!skip_call) {
7167        lock.unlock();
7168        result = dev_data->dispatch_table.EndCommandBuffer(commandBuffer);
7169        lock.lock();
7170        if (VK_SUCCESS == result) {
7171            pCB->state = CB_RECORDED;
7172            // Reset CB status flags
7173            pCB->status = 0;
7174        }
7175    } else {
7176        result = VK_ERROR_VALIDATION_FAILED_EXT;
7177    }
7178    lock.unlock();
7179    return result;
7180}
7181
7182VKAPI_ATTR VkResult VKAPI_CALL ResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) {
7183    bool skip_call = false;
7184    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7185    std::unique_lock<std::mutex> lock(global_lock);
7186    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7187    VkCommandPool cmdPool = pCB->createInfo.commandPool;
7188    auto pPool = getCommandPoolNode(dev_data, cmdPool);
7189    if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
7190        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7191                             (uint64_t)commandBuffer, __LINE__, VALIDATION_ERROR_00093, "DS",
7192                             "Attempt to reset command buffer (0x%p) created from command pool (0x%" PRIxLEAST64
7193                             ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set. %s",
7194                             commandBuffer, (uint64_t)cmdPool, validation_error_map[VALIDATION_ERROR_00093]);
7195    }
7196    skip_call |= checkCommandBufferInFlight(dev_data, pCB, "reset", VALIDATION_ERROR_00092);
7197    lock.unlock();
7198    if (skip_call) return VK_ERROR_VALIDATION_FAILED_EXT;
7199    VkResult result = dev_data->dispatch_table.ResetCommandBuffer(commandBuffer, flags);
7200    if (VK_SUCCESS == result) {
7201        lock.lock();
7202        dev_data->globalInFlightCmdBuffers.erase(commandBuffer);
7203        resetCB(dev_data, commandBuffer);
7204        lock.unlock();
7205    }
7206    return result;
7207}
7208
7209VKAPI_ATTR void VKAPI_CALL CmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
7210                                           VkPipeline pipeline) {
7211    bool skip = false;
7212    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7213    std::unique_lock<std::mutex> lock(global_lock);
7214    GLOBAL_CB_NODE *cb_state = getCBNode(dev_data, commandBuffer);
7215    if (cb_state) {
7216        skip |= ValidateCmd(dev_data, cb_state, CMD_BINDPIPELINE, "vkCmdBindPipeline()");
7217        UpdateCmdBufferLastCmd(dev_data, cb_state, CMD_BINDPIPELINE);
7218        if ((VK_PIPELINE_BIND_POINT_COMPUTE == pipelineBindPoint) && (cb_state->activeRenderPass)) {
7219            skip |=
7220                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
7221                        (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
7222                        "Incorrectly binding compute pipeline (0x%" PRIxLEAST64 ") during active RenderPass (0x%" PRIxLEAST64 ")",
7223                        (uint64_t)pipeline, (uint64_t)cb_state->activeRenderPass->renderPass);
7224        }
7225        // TODO: VALIDATION_ERROR_00594 VALIDATION_ERROR_00596
7226
7227        PIPELINE_STATE *pipe_state = getPipelineState(dev_data, pipeline);
7228        if (pipe_state) {
7229            cb_state->lastBound[pipelineBindPoint].pipeline_state = pipe_state;
7230            set_cb_pso_status(cb_state, pipe_state);
7231            set_pipeline_state(pipe_state);
7232        } else {
7233            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
7234                            (uint64_t)pipeline, __LINE__, VALIDATION_ERROR_00600, "DS",
7235                            "Attempt to bind Pipeline 0x%" PRIxLEAST64 " that doesn't exist! %s", (uint64_t)(pipeline),
7236                            validation_error_map[VALIDATION_ERROR_00600]);
7237        }
7238        addCommandBufferBinding(&pipe_state->cb_bindings,
7239                                {reinterpret_cast<uint64_t &>(pipeline), VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT}, cb_state);
7240        if (VK_PIPELINE_BIND_POINT_GRAPHICS == pipelineBindPoint) {
7241            // Add binding for child renderpass
7242            auto rp_state = getRenderPassState(dev_data, pipe_state->graphicsPipelineCI.renderPass);
7243            if (rp_state) {
7244                addCommandBufferBinding(
7245                    &rp_state->cb_bindings,
7246                    {reinterpret_cast<uint64_t &>(rp_state->renderPass), VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT}, cb_state);
7247            }
7248        }
7249    }
7250    lock.unlock();
7251    if (!skip) dev_data->dispatch_table.CmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline);
7252}
7253
7254VKAPI_ATTR void VKAPI_CALL CmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount,
7255                                          const VkViewport *pViewports) {
7256    bool skip_call = false;
7257    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7258    std::unique_lock<std::mutex> lock(global_lock);
7259    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7260    if (pCB) {
7261        skip_call |= ValidateCmd(dev_data, pCB, CMD_SETVIEWPORTSTATE, "vkCmdSetViewport()");
7262        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETVIEWPORTSTATE);
7263        pCB->viewportMask |= ((1u << viewportCount) - 1u) << firstViewport;
7264    }
7265    lock.unlock();
7266    if (!skip_call) dev_data->dispatch_table.CmdSetViewport(commandBuffer, firstViewport, viewportCount, pViewports);
7267}
7268
7269VKAPI_ATTR void VKAPI_CALL CmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount,
7270                                         const VkRect2D *pScissors) {
7271    bool skip_call = false;
7272    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7273    std::unique_lock<std::mutex> lock(global_lock);
7274    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7275    if (pCB) {
7276        skip_call |= ValidateCmd(dev_data, pCB, CMD_SETSCISSORSTATE, "vkCmdSetScissor()");
7277        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETSCISSORSTATE);
7278        pCB->scissorMask |= ((1u << scissorCount) - 1u) << firstScissor;
7279    }
7280    lock.unlock();
7281    if (!skip_call) dev_data->dispatch_table.CmdSetScissor(commandBuffer, firstScissor, scissorCount, pScissors);
7282}
7283
7284VKAPI_ATTR void VKAPI_CALL CmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) {
7285    bool skip_call = false;
7286    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7287    std::unique_lock<std::mutex> lock(global_lock);
7288    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7289    if (pCB) {
7290        skip_call |= ValidateCmd(dev_data, pCB, CMD_SETLINEWIDTHSTATE, "vkCmdSetLineWidth()");
7291        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETLINEWIDTHSTATE);
7292        pCB->status |= CBSTATUS_LINE_WIDTH_SET;
7293
7294        PIPELINE_STATE *pPipeTrav = pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline_state;
7295        if (pPipeTrav != NULL && !isDynamic(pPipeTrav, VK_DYNAMIC_STATE_LINE_WIDTH)) {
7296            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
7297                                 reinterpret_cast<uint64_t &>(commandBuffer), __LINE__, VALIDATION_ERROR_01476, "DS",
7298                                 "vkCmdSetLineWidth called but pipeline was created without VK_DYNAMIC_STATE_LINE_WIDTH "
7299                                 "flag.  This is undefined behavior and could be ignored. %s",
7300                                 validation_error_map[VALIDATION_ERROR_01476]);
7301        } else {
7302            skip_call |= verifyLineWidth(dev_data, DRAWSTATE_INVALID_SET, reinterpret_cast<uint64_t &>(commandBuffer), lineWidth);
7303        }
7304    }
7305    lock.unlock();
7306    if (!skip_call) dev_data->dispatch_table.CmdSetLineWidth(commandBuffer, lineWidth);
7307}
7308
7309VKAPI_ATTR void VKAPI_CALL CmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp,
7310                                           float depthBiasSlopeFactor) {
7311    bool skip_call = false;
7312    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7313    std::unique_lock<std::mutex> lock(global_lock);
7314    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7315    if (pCB) {
7316        skip_call |= ValidateCmd(dev_data, pCB, CMD_SETDEPTHBIASSTATE, "vkCmdSetDepthBias()");
7317        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETDEPTHBIASSTATE);
7318        pCB->status |= CBSTATUS_DEPTH_BIAS_SET;
7319    }
7320    lock.unlock();
7321    if (!skip_call)
7322        dev_data->dispatch_table.CmdSetDepthBias(commandBuffer, depthBiasConstantFactor, depthBiasClamp, depthBiasSlopeFactor);
7323}
7324
7325VKAPI_ATTR void VKAPI_CALL CmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) {
7326    bool skip_call = false;
7327    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7328    std::unique_lock<std::mutex> lock(global_lock);
7329    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7330    if (pCB) {
7331        skip_call |= ValidateCmd(dev_data, pCB, CMD_SETBLENDSTATE, "vkCmdSetBlendConstants()");
7332        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETBLENDSTATE);
7333        pCB->status |= CBSTATUS_BLEND_CONSTANTS_SET;
7334    }
7335    lock.unlock();
7336    if (!skip_call) dev_data->dispatch_table.CmdSetBlendConstants(commandBuffer, blendConstants);
7337}
7338
7339VKAPI_ATTR void VKAPI_CALL CmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) {
7340    bool skip_call = false;
7341    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7342    std::unique_lock<std::mutex> lock(global_lock);
7343    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7344    if (pCB) {
7345        skip_call |= ValidateCmd(dev_data, pCB, CMD_SETDEPTHBOUNDSSTATE, "vkCmdSetDepthBounds()");
7346        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETDEPTHBOUNDSSTATE);
7347        pCB->status |= CBSTATUS_DEPTH_BOUNDS_SET;
7348    }
7349    lock.unlock();
7350    if (!skip_call) dev_data->dispatch_table.CmdSetDepthBounds(commandBuffer, minDepthBounds, maxDepthBounds);
7351}
7352
7353VKAPI_ATTR void VKAPI_CALL CmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask,
7354                                                    uint32_t compareMask) {
7355    bool skip_call = false;
7356    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7357    std::unique_lock<std::mutex> lock(global_lock);
7358    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7359    if (pCB) {
7360        skip_call |= ValidateCmd(dev_data, pCB, CMD_SETSTENCILREADMASKSTATE, "vkCmdSetStencilCompareMask()");
7361        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETSTENCILREADMASKSTATE);
7362        pCB->status |= CBSTATUS_STENCIL_READ_MASK_SET;
7363    }
7364    lock.unlock();
7365    if (!skip_call) dev_data->dispatch_table.CmdSetStencilCompareMask(commandBuffer, faceMask, compareMask);
7366}
7367
7368VKAPI_ATTR void VKAPI_CALL CmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask) {
7369    bool skip_call = false;
7370    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7371    std::unique_lock<std::mutex> lock(global_lock);
7372    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7373    if (pCB) {
7374        skip_call |= ValidateCmd(dev_data, pCB, CMD_SETSTENCILWRITEMASKSTATE, "vkCmdSetStencilWriteMask()");
7375        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETSTENCILWRITEMASKSTATE);
7376        pCB->status |= CBSTATUS_STENCIL_WRITE_MASK_SET;
7377    }
7378    lock.unlock();
7379    if (!skip_call) dev_data->dispatch_table.CmdSetStencilWriteMask(commandBuffer, faceMask, writeMask);
7380}
7381
7382VKAPI_ATTR void VKAPI_CALL CmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference) {
7383    bool skip_call = false;
7384    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7385    std::unique_lock<std::mutex> lock(global_lock);
7386    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7387    if (pCB) {
7388        skip_call |= ValidateCmd(dev_data, pCB, CMD_SETSTENCILREFERENCESTATE, "vkCmdSetStencilReference()");
7389        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETSTENCILREFERENCESTATE);
7390        pCB->status |= CBSTATUS_STENCIL_REFERENCE_SET;
7391    }
7392    lock.unlock();
7393    if (!skip_call) dev_data->dispatch_table.CmdSetStencilReference(commandBuffer, faceMask, reference);
7394}
7395
7396VKAPI_ATTR void VKAPI_CALL CmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
7397                                                 VkPipelineLayout layout, uint32_t firstSet, uint32_t setCount,
7398                                                 const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount,
7399                                                 const uint32_t *pDynamicOffsets) {
7400    bool skip_call = false;
7401    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7402    std::unique_lock<std::mutex> lock(global_lock);
7403    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7404    if (pCB) {
7405        if (pCB->state == CB_RECORDING) {
7406            // Track total count of dynamic descriptor types to make sure we have an offset for each one
7407            uint32_t totalDynamicDescriptors = 0;
7408            string errorString = "";
7409            uint32_t lastSetIndex = firstSet + setCount - 1;
7410            if (lastSetIndex >= pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size()) {
7411                pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1);
7412                pCB->lastBound[pipelineBindPoint].dynamicOffsets.resize(lastSetIndex + 1);
7413            }
7414            auto oldFinalBoundSet = pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex];
7415            auto pipeline_layout = getPipelineLayout(dev_data, layout);
7416            for (uint32_t set_idx = 0; set_idx < setCount; set_idx++) {
7417                cvdescriptorset::DescriptorSet *descriptor_set = getSetNode(dev_data, pDescriptorSets[set_idx]);
7418                if (descriptor_set) {
7419                    pCB->lastBound[pipelineBindPoint].pipeline_layout = *pipeline_layout;
7420                    pCB->lastBound[pipelineBindPoint].boundDescriptorSets[set_idx + firstSet] = descriptor_set;
7421                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
7422                                         VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[set_idx],
7423                                         __LINE__, DRAWSTATE_NONE, "DS", "Descriptor Set 0x%" PRIxLEAST64 " bound on pipeline %s",
7424                                         (uint64_t)pDescriptorSets[set_idx], string_VkPipelineBindPoint(pipelineBindPoint));
7425                    if (!descriptor_set->IsUpdated() && (descriptor_set->GetTotalDescriptorCount() != 0)) {
7426                        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
7427                                             VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[set_idx],
7428                                             __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
7429                                             "Descriptor Set 0x%" PRIxLEAST64
7430                                             " bound but it was never updated. You may want to either update it or not bind it.",
7431                                             (uint64_t)pDescriptorSets[set_idx]);
7432                    }
7433                    // Verify that set being bound is compatible with overlapping setLayout of pipelineLayout
7434                    if (!verify_set_layout_compatibility(dev_data, descriptor_set, pipeline_layout, set_idx + firstSet,
7435                                                         errorString)) {
7436                        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7437                                             VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[set_idx],
7438                                             __LINE__, VALIDATION_ERROR_00974, "DS",
7439                                             "descriptorSet #%u being bound is not compatible with overlapping descriptorSetLayout "
7440                                             "at index %u of pipelineLayout 0x%" PRIxLEAST64 " due to: %s. %s",
7441                                             set_idx, set_idx + firstSet, reinterpret_cast<uint64_t &>(layout), errorString.c_str(),
7442                                             validation_error_map[VALIDATION_ERROR_00974]);
7443                    }
7444
7445                    auto setDynamicDescriptorCount = descriptor_set->GetDynamicDescriptorCount();
7446
7447                    pCB->lastBound[pipelineBindPoint].dynamicOffsets[firstSet + set_idx].clear();
7448
7449                    if (setDynamicDescriptorCount) {
7450                        // First make sure we won't overstep bounds of pDynamicOffsets array
7451                        if ((totalDynamicDescriptors + setDynamicDescriptorCount) > dynamicOffsetCount) {
7452                            skip_call |=
7453                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7454                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[set_idx],
7455                                        __LINE__, DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
7456                                        "descriptorSet #%u (0x%" PRIxLEAST64
7457                                        ") requires %u dynamicOffsets, but only %u dynamicOffsets are left in pDynamicOffsets "
7458                                        "array. There must be one dynamic offset for each dynamic descriptor being bound.",
7459                                        set_idx, (uint64_t)pDescriptorSets[set_idx], descriptor_set->GetDynamicDescriptorCount(),
7460                                        (dynamicOffsetCount - totalDynamicDescriptors));
7461                        } else {  // Validate and store dynamic offsets with the set
7462                            // Validate Dynamic Offset Minimums
7463                            uint32_t cur_dyn_offset = totalDynamicDescriptors;
7464                            for (uint32_t d = 0; d < descriptor_set->GetTotalDescriptorCount(); d++) {
7465                                if (descriptor_set->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
7466                                    if (vk_safe_modulo(
7467                                            pDynamicOffsets[cur_dyn_offset],
7468                                            dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment) != 0) {
7469                                        skip_call |= log_msg(
7470                                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7471                                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, VALIDATION_ERROR_00978,
7472                                            "DS",
7473                                            "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
7474                                            "device limit minUniformBufferOffsetAlignment 0x%" PRIxLEAST64 ". %s",
7475                                            cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
7476                                            dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment,
7477                                            validation_error_map[VALIDATION_ERROR_00978]);
7478                                    }
7479                                    cur_dyn_offset++;
7480                                } else if (descriptor_set->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
7481                                    if (vk_safe_modulo(
7482                                            pDynamicOffsets[cur_dyn_offset],
7483                                            dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment) != 0) {
7484                                        skip_call |= log_msg(
7485                                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7486                                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, VALIDATION_ERROR_00978,
7487                                            "DS",
7488                                            "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
7489                                            "device limit minStorageBufferOffsetAlignment 0x%" PRIxLEAST64 ". %s",
7490                                            cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
7491                                            dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment,
7492                                            validation_error_map[VALIDATION_ERROR_00978]);
7493                                    }
7494                                    cur_dyn_offset++;
7495                                }
7496                            }
7497
7498                            pCB->lastBound[pipelineBindPoint].dynamicOffsets[firstSet + set_idx] =
7499                                std::vector<uint32_t>(pDynamicOffsets + totalDynamicDescriptors,
7500                                                      pDynamicOffsets + totalDynamicDescriptors + setDynamicDescriptorCount);
7501                            // Keep running total of dynamic descriptor count to verify at the end
7502                            totalDynamicDescriptors += setDynamicDescriptorCount;
7503                        }
7504                    }
7505                } else {
7506                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7507                                         VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[set_idx],
7508                                         __LINE__, DRAWSTATE_INVALID_SET, "DS",
7509                                         "Attempt to bind descriptor set 0x%" PRIxLEAST64 " that doesn't exist!",
7510                                         (uint64_t)pDescriptorSets[set_idx]);
7511                }
7512                skip_call |= ValidateCmd(dev_data, pCB, CMD_BINDDESCRIPTORSETS, "vkCmdBindDescriptorSets()");
7513                UpdateCmdBufferLastCmd(dev_data, pCB, CMD_BINDDESCRIPTORSETS);
7514                // For any previously bound sets, need to set them to "invalid" if they were disturbed by this update
7515                if (firstSet > 0) {  // Check set #s below the first bound set
7516                    for (uint32_t i = 0; i < firstSet; ++i) {
7517                        if (pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] &&
7518                            !verify_set_layout_compatibility(dev_data, pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i],
7519                                                             pipeline_layout, i, errorString)) {
7520                            skip_call |= log_msg(
7521                                dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7522                                VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
7523                                (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], __LINE__, DRAWSTATE_NONE, "DS",
7524                                "DescriptorSet 0x%" PRIxLEAST64
7525                                " previously bound as set #%u was disturbed by newly bound pipelineLayout (0x%" PRIxLEAST64 ")",
7526                                (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], i, (uint64_t)layout);
7527                            pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] = VK_NULL_HANDLE;
7528                        }
7529                    }
7530                }
7531                // Check if newly last bound set invalidates any remaining bound sets
7532                if ((pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size() - 1) > (lastSetIndex)) {
7533                    if (oldFinalBoundSet &&
7534                        !verify_set_layout_compatibility(dev_data, oldFinalBoundSet, pipeline_layout, lastSetIndex, errorString)) {
7535                        auto old_set = oldFinalBoundSet->GetSet();
7536                        skip_call |=
7537                            log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7538                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, reinterpret_cast<uint64_t &>(old_set), __LINE__,
7539                                    DRAWSTATE_NONE, "DS", "DescriptorSet 0x%" PRIxLEAST64
7540                                                          " previously bound as set #%u is incompatible with set 0x%" PRIxLEAST64
7541                                                          " newly bound as set #%u so set #%u and any subsequent sets were "
7542                                                          "disturbed by newly bound pipelineLayout (0x%" PRIxLEAST64 ")",
7543                                    reinterpret_cast<uint64_t &>(old_set), lastSetIndex,
7544                                    (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex], lastSetIndex,
7545                                    lastSetIndex + 1, (uint64_t)layout);
7546                        pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1);
7547                    }
7548                }
7549            }
7550            //  dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound
7551            if (totalDynamicDescriptors != dynamicOffsetCount) {
7552                skip_call |=
7553                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7554                            (uint64_t)commandBuffer, __LINE__, VALIDATION_ERROR_00975, "DS",
7555                            "Attempting to bind %u descriptorSets with %u dynamic descriptors, but dynamicOffsetCount "
7556                            "is %u. It should exactly match the number of dynamic descriptors. %s",
7557                            setCount, totalDynamicDescriptors, dynamicOffsetCount, validation_error_map[VALIDATION_ERROR_00975]);
7558            }
7559        } else {
7560            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindDescriptorSets()");
7561        }
7562    }
7563    lock.unlock();
7564    if (!skip_call)
7565        dev_data->dispatch_table.CmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet, setCount,
7566                                                       pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
7567}
7568
7569VKAPI_ATTR void VKAPI_CALL CmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
7570                                              VkIndexType indexType) {
7571    bool skip_call = false;
7572    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7573    // TODO : Somewhere need to verify that IBs have correct usage state flagged
7574    std::unique_lock<std::mutex> lock(global_lock);
7575
7576    auto buffer_state = getBufferState(dev_data, buffer);
7577    auto cb_node = getCBNode(dev_data, commandBuffer);
7578    if (cb_node && buffer_state) {
7579        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buffer_state, "vkCmdBindIndexBuffer()", VALIDATION_ERROR_02543);
7580        std::function<bool()> function = [=]() {
7581            return ValidateBufferMemoryIsValid(dev_data, buffer_state, "vkCmdBindIndexBuffer()");
7582        };
7583        cb_node->validate_functions.push_back(function);
7584        skip_call |= ValidateCmd(dev_data, cb_node, CMD_BINDINDEXBUFFER, "vkCmdBindIndexBuffer()");
7585        UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_BINDINDEXBUFFER);
7586        VkDeviceSize offset_align = 0;
7587        switch (indexType) {
7588            case VK_INDEX_TYPE_UINT16:
7589                offset_align = 2;
7590                break;
7591            case VK_INDEX_TYPE_UINT32:
7592                offset_align = 4;
7593                break;
7594            default:
7595                // ParamChecker should catch bad enum, we'll also throw alignment error below if offset_align stays 0
7596                break;
7597        }
7598        if (!offset_align || (offset % offset_align)) {
7599            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7600                                 DRAWSTATE_VTX_INDEX_ALIGNMENT_ERROR, "DS",
7601                                 "vkCmdBindIndexBuffer() offset (0x%" PRIxLEAST64 ") does not fall on alignment (%s) boundary.",
7602                                 offset, string_VkIndexType(indexType));
7603        }
7604        cb_node->status |= CBSTATUS_INDEX_BUFFER_BOUND;
7605    } else {
7606        assert(0);
7607    }
7608    lock.unlock();
7609    if (!skip_call) dev_data->dispatch_table.CmdBindIndexBuffer(commandBuffer, buffer, offset, indexType);
7610}
7611
7612void updateResourceTracking(GLOBAL_CB_NODE *pCB, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers) {
7613    uint32_t end = firstBinding + bindingCount;
7614    if (pCB->currentDrawData.buffers.size() < end) {
7615        pCB->currentDrawData.buffers.resize(end);
7616    }
7617    for (uint32_t i = 0; i < bindingCount; ++i) {
7618        pCB->currentDrawData.buffers[i + firstBinding] = pBuffers[i];
7619    }
7620}
7621
7622static inline void updateResourceTrackingOnDraw(GLOBAL_CB_NODE *pCB) { pCB->drawData.push_back(pCB->currentDrawData); }
7623
7624VKAPI_ATTR void VKAPI_CALL CmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount,
7625                                                const VkBuffer *pBuffers, const VkDeviceSize *pOffsets) {
7626    bool skip_call = false;
7627    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7628    // TODO : Somewhere need to verify that VBs have correct usage state flagged
7629    std::unique_lock<std::mutex> lock(global_lock);
7630
7631    auto cb_node = getCBNode(dev_data, commandBuffer);
7632    if (cb_node) {
7633        for (uint32_t i = 0; i < bindingCount; ++i) {
7634            auto buffer_state = getBufferState(dev_data, pBuffers[i]);
7635            assert(buffer_state);
7636            skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buffer_state, "vkCmdBindVertexBuffers()", VALIDATION_ERROR_02546);
7637            std::function<bool()> function = [=]() {
7638                return ValidateBufferMemoryIsValid(dev_data, buffer_state, "vkCmdBindVertexBuffers()");
7639            };
7640            cb_node->validate_functions.push_back(function);
7641        }
7642        skip_call |= ValidateCmd(dev_data, cb_node, CMD_BINDVERTEXBUFFER, "vkCmdBindVertexBuffer()");
7643        UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_BINDVERTEXBUFFER);
7644        updateResourceTracking(cb_node, firstBinding, bindingCount, pBuffers);
7645    } else {
7646        skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindVertexBuffer()");
7647    }
7648    lock.unlock();
7649    if (!skip_call) dev_data->dispatch_table.CmdBindVertexBuffers(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets);
7650}
7651
7652// Expects global_lock to be held by caller
7653static void MarkStoreImagesAndBuffersAsWritten(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
7654    for (auto imageView : pCB->updateImages) {
7655        auto view_state = getImageViewState(dev_data, imageView);
7656        if (!view_state) continue;
7657
7658        auto image_state = getImageState(dev_data, view_state->create_info.image);
7659        assert(image_state);
7660        std::function<bool()> function = [=]() {
7661            SetImageMemoryValid(dev_data, image_state, true);
7662            return false;
7663        };
7664        pCB->validate_functions.push_back(function);
7665    }
7666    for (auto buffer : pCB->updateBuffers) {
7667        auto buffer_state = getBufferState(dev_data, buffer);
7668        assert(buffer_state);
7669        std::function<bool()> function = [=]() {
7670            SetBufferMemoryValid(dev_data, buffer_state, true);
7671            return false;
7672        };
7673        pCB->validate_functions.push_back(function);
7674    }
7675}
7676
7677// Generic function to handle validation for all CmdDraw* type functions
7678static bool ValidateCmdDrawType(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed, VkPipelineBindPoint bind_point,
7679                                CMD_TYPE cmd_type, GLOBAL_CB_NODE **cb_state, const char *caller,
7680                                UNIQUE_VALIDATION_ERROR_CODE msg_code, UNIQUE_VALIDATION_ERROR_CODE const dynamic_state_msg_code) {
7681    bool skip = false;
7682    *cb_state = getCBNode(dev_data, cmd_buffer);
7683    if (*cb_state) {
7684        skip |= ValidateCmd(dev_data, *cb_state, cmd_type, caller);
7685        skip |= ValidateDrawState(dev_data, *cb_state, indexed, bind_point, caller, dynamic_state_msg_code);
7686        skip |= (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point) ? outsideRenderPass(dev_data, *cb_state, caller, msg_code)
7687                                                                : insideRenderPass(dev_data, *cb_state, caller, msg_code);
7688    }
7689    return skip;
7690}
7691
7692// Generic function to handle state update for all CmdDraw* and CmdDispatch* type functions
7693static void UpdateStateCmdDrawDispatchType(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
7694                                           CMD_TYPE cmd_type) {
7695    UpdateDrawState(dev_data, cb_state, bind_point);
7696    MarkStoreImagesAndBuffersAsWritten(dev_data, cb_state);
7697    UpdateCmdBufferLastCmd(dev_data, cb_state, cmd_type);
7698}
7699
7700// Generic function to handle state update for all CmdDraw* type functions
7701static void UpdateStateCmdDrawType(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
7702                                   CMD_TYPE cmd_type, DRAW_TYPE draw_type) {
7703    UpdateStateCmdDrawDispatchType(dev_data, cb_state, bind_point, cmd_type);
7704    updateResourceTrackingOnDraw(cb_state);
7705    cb_state->drawCount[draw_type]++;
7706}
7707
7708static bool PreCallValidateCmdDraw(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed, VkPipelineBindPoint bind_point,
7709                                   GLOBAL_CB_NODE **cb_state, const char *caller) {
7710    return ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAW, cb_state, caller, VALIDATION_ERROR_01365,
7711                               VALIDATION_ERROR_02203);
7712}
7713
7714static void PostCallRecordCmdDraw(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
7715    UpdateStateCmdDrawType(dev_data, cb_state, bind_point, CMD_DRAW, DRAW);
7716}
7717
7718VKAPI_ATTR void VKAPI_CALL CmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
7719                                   uint32_t firstVertex, uint32_t firstInstance) {
7720    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7721    GLOBAL_CB_NODE *cb_state = nullptr;
7722    std::unique_lock<std::mutex> lock(global_lock);
7723    bool skip = PreCallValidateCmdDraw(dev_data, commandBuffer, false, VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state, "vkCmdDraw()");
7724    lock.unlock();
7725    if (!skip) {
7726        dev_data->dispatch_table.CmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
7727        lock.lock();
7728        PostCallRecordCmdDraw(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS);
7729        lock.unlock();
7730    }
7731}
7732
7733static bool PreCallValidateCmdDrawIndexed(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed,
7734                                          VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state, const char *caller) {
7735    return ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAWINDEXED, cb_state, caller, VALIDATION_ERROR_01372,
7736                               VALIDATION_ERROR_02216);
7737}
7738
7739static void PostCallRecordCmdDrawIndexed(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
7740    UpdateStateCmdDrawType(dev_data, cb_state, bind_point, CMD_DRAWINDEXED, DRAW_INDEXED);
7741}
7742
7743VKAPI_ATTR void VKAPI_CALL CmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount,
7744                                          uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance) {
7745    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7746    GLOBAL_CB_NODE *cb_state = nullptr;
7747    std::unique_lock<std::mutex> lock(global_lock);
7748    bool skip = PreCallValidateCmdDrawIndexed(dev_data, commandBuffer, true, VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state,
7749                                              "vkCmdDrawIndexed()");
7750    lock.unlock();
7751    if (!skip) {
7752        dev_data->dispatch_table.CmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset, firstInstance);
7753        lock.lock();
7754        PostCallRecordCmdDrawIndexed(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS);
7755        lock.unlock();
7756    }
7757}
7758
7759static bool PreCallValidateCmdDrawIndirect(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkBuffer buffer, bool indexed,
7760                                           VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state, BUFFER_STATE **buffer_state,
7761                                           const char *caller) {
7762    bool skip = ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAWINDIRECT, cb_state, caller,
7763                                    VALIDATION_ERROR_01381, VALIDATION_ERROR_02234);
7764    *buffer_state = getBufferState(dev_data, buffer);
7765    skip |= ValidateMemoryIsBoundToBuffer(dev_data, *buffer_state, caller, VALIDATION_ERROR_02544);
7766    return skip;
7767}
7768
7769static void PostCallRecordCmdDrawIndirect(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
7770                                          BUFFER_STATE *buffer_state) {
7771    UpdateStateCmdDrawType(dev_data, cb_state, bind_point, CMD_DRAWINDIRECT, DRAW_INDIRECT);
7772    AddCommandBufferBindingBuffer(dev_data, cb_state, buffer_state);
7773}
7774
7775VKAPI_ATTR void VKAPI_CALL CmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count,
7776                                           uint32_t stride) {
7777    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7778    GLOBAL_CB_NODE *cb_state = nullptr;
7779    BUFFER_STATE *buffer_state = nullptr;
7780    std::unique_lock<std::mutex> lock(global_lock);
7781    bool skip = PreCallValidateCmdDrawIndirect(dev_data, commandBuffer, buffer, false, VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state,
7782                                               &buffer_state, "vkCmdDrawIndirect()");
7783    lock.unlock();
7784    if (!skip) {
7785        dev_data->dispatch_table.CmdDrawIndirect(commandBuffer, buffer, offset, count, stride);
7786        lock.lock();
7787        PostCallRecordCmdDrawIndirect(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS, buffer_state);
7788        lock.unlock();
7789    }
7790}
7791
7792static bool PreCallValidateCmdDrawIndexedIndirect(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkBuffer buffer, bool indexed,
7793                                                  VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state,
7794                                                  BUFFER_STATE **buffer_state, const char *caller) {
7795    bool skip = ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAWINDEXEDINDIRECT, cb_state, caller,
7796                                    VALIDATION_ERROR_01393, VALIDATION_ERROR_02272);
7797    *buffer_state = getBufferState(dev_data, buffer);
7798    skip |= ValidateMemoryIsBoundToBuffer(dev_data, *buffer_state, caller, VALIDATION_ERROR_02545);
7799    return skip;
7800}
7801
7802static void PostCallRecordCmdDrawIndexedIndirect(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
7803                                                 BUFFER_STATE *buffer_state) {
7804    UpdateStateCmdDrawType(dev_data, cb_state, bind_point, CMD_DRAWINDEXEDINDIRECT, DRAW_INDEXED_INDIRECT);
7805    AddCommandBufferBindingBuffer(dev_data, cb_state, buffer_state);
7806}
7807
7808VKAPI_ATTR void VKAPI_CALL CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
7809                                                  uint32_t count, uint32_t stride) {
7810    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7811    GLOBAL_CB_NODE *cb_state = nullptr;
7812    BUFFER_STATE *buffer_state = nullptr;
7813    std::unique_lock<std::mutex> lock(global_lock);
7814    bool skip = PreCallValidateCmdDrawIndexedIndirect(dev_data, commandBuffer, buffer, true, VK_PIPELINE_BIND_POINT_GRAPHICS,
7815                                                      &cb_state, &buffer_state, "vkCmdDrawIndexedIndirect()");
7816    lock.unlock();
7817    if (!skip) {
7818        dev_data->dispatch_table.CmdDrawIndexedIndirect(commandBuffer, buffer, offset, count, stride);
7819        lock.lock();
7820        PostCallRecordCmdDrawIndexedIndirect(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS, buffer_state);
7821        lock.unlock();
7822    }
7823}
7824
7825static bool PreCallValidateCmdDispatch(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed,
7826                                       VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state, const char *caller) {
7827    return ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DISPATCH, cb_state, caller, VALIDATION_ERROR_01562,
7828                               VALIDATION_ERROR_UNDEFINED);
7829}
7830
7831static void PostCallRecordCmdDispatch(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
7832    UpdateStateCmdDrawDispatchType(dev_data, cb_state, bind_point, CMD_DISPATCH);
7833}
7834
7835VKAPI_ATTR void VKAPI_CALL CmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
7836    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7837    GLOBAL_CB_NODE *cb_state = nullptr;
7838    std::unique_lock<std::mutex> lock(global_lock);
7839    bool skip =
7840        PreCallValidateCmdDispatch(dev_data, commandBuffer, false, VK_PIPELINE_BIND_POINT_COMPUTE, &cb_state, "vkCmdDispatch()");
7841    lock.unlock();
7842    if (!skip) {
7843        dev_data->dispatch_table.CmdDispatch(commandBuffer, x, y, z);
7844        lock.lock();
7845        PostCallRecordCmdDispatch(dev_data, cb_state, VK_PIPELINE_BIND_POINT_COMPUTE);
7846        lock.unlock();
7847    }
7848}
7849
7850static bool PreCallValidateCmdDispatchIndirect(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkBuffer buffer, bool indexed,
7851                                               VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state,
7852                                               BUFFER_STATE **buffer_state, const char *caller) {
7853    bool skip = ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DISPATCHINDIRECT, cb_state, caller,
7854                                    VALIDATION_ERROR_01569, VALIDATION_ERROR_UNDEFINED);
7855    *buffer_state = getBufferState(dev_data, buffer);
7856    skip |= ValidateMemoryIsBoundToBuffer(dev_data, *buffer_state, caller, VALIDATION_ERROR_02547);
7857    return skip;
7858}
7859
7860static void PostCallRecordCmdDispatchIndirect(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
7861                                              BUFFER_STATE *buffer_state) {
7862    UpdateStateCmdDrawDispatchType(dev_data, cb_state, bind_point, CMD_DISPATCHINDIRECT);
7863    AddCommandBufferBindingBuffer(dev_data, cb_state, buffer_state);
7864}
7865
7866VKAPI_ATTR void VKAPI_CALL CmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
7867    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7868    GLOBAL_CB_NODE *cb_state = nullptr;
7869    BUFFER_STATE *buffer_state = nullptr;
7870    std::unique_lock<std::mutex> lock(global_lock);
7871    bool skip = PreCallValidateCmdDispatchIndirect(dev_data, commandBuffer, buffer, false, VK_PIPELINE_BIND_POINT_COMPUTE,
7872                                                   &cb_state, &buffer_state, "vkCmdDispatchIndirect()");
7873    lock.unlock();
7874    if (!skip) {
7875        dev_data->dispatch_table.CmdDispatchIndirect(commandBuffer, buffer, offset);
7876        lock.lock();
7877        PostCallRecordCmdDispatchIndirect(dev_data, cb_state, VK_PIPELINE_BIND_POINT_COMPUTE, buffer_state);
7878        lock.unlock();
7879    }
7880}
7881
7882VKAPI_ATTR void VKAPI_CALL CmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
7883                                         uint32_t regionCount, const VkBufferCopy *pRegions) {
7884    bool skip_call = false;
7885    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7886    std::unique_lock<std::mutex> lock(global_lock);
7887
7888    auto cb_node = getCBNode(dev_data, commandBuffer);
7889    auto src_buff_state = getBufferState(dev_data, srcBuffer);
7890    auto dst_buff_state = getBufferState(dev_data, dstBuffer);
7891    if (cb_node && src_buff_state && dst_buff_state) {
7892        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, src_buff_state, "vkCmdCopyBuffer()", VALIDATION_ERROR_02531);
7893        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_state, "vkCmdCopyBuffer()", VALIDATION_ERROR_02532);
7894        // Update bindings between buffers and cmd buffer
7895        AddCommandBufferBindingBuffer(dev_data, cb_node, src_buff_state);
7896        AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_state);
7897        // Validate that SRC & DST buffers have correct usage flags set
7898        skip_call |= ValidateBufferUsageFlags(dev_data, src_buff_state, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true,
7899                                              VALIDATION_ERROR_01164, "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
7900        skip_call |= ValidateBufferUsageFlags(dev_data, dst_buff_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
7901                                              VALIDATION_ERROR_01165, "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
7902
7903        std::function<bool()> function = [=]() {
7904            return ValidateBufferMemoryIsValid(dev_data, src_buff_state, "vkCmdCopyBuffer()");
7905        };
7906        cb_node->validate_functions.push_back(function);
7907        function = [=]() {
7908            SetBufferMemoryValid(dev_data, dst_buff_state, true);
7909            return false;
7910        };
7911        cb_node->validate_functions.push_back(function);
7912
7913        skip_call |= ValidateCmd(dev_data, cb_node, CMD_COPYBUFFER, "vkCmdCopyBuffer()");
7914        UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_COPYBUFFER);
7915        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyBuffer()", VALIDATION_ERROR_01172);
7916    } else {
7917        // Param_checker will flag errors on invalid objects, just assert here as debugging aid
7918        assert(0);
7919    }
7920    lock.unlock();
7921    if (!skip_call) dev_data->dispatch_table.CmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions);
7922}
7923
7924// Test if two VkExtent3D structs are equivalent
7925static inline bool IsExtentEqual(const VkExtent3D *extent, const VkExtent3D *other_extent) {
7926    bool result = true;
7927    if ((extent->width != other_extent->width) || (extent->height != other_extent->height) ||
7928        (extent->depth != other_extent->depth)) {
7929        result = false;
7930    }
7931    return result;
7932}
7933
7934// Returns the image extent of a specific subresource.
7935static inline VkExtent3D GetImageSubresourceExtent(const IMAGE_STATE *img, const VkImageSubresourceLayers *subresource) {
7936    const uint32_t mip = subresource->mipLevel;
7937    VkExtent3D extent = img->createInfo.extent;
7938    extent.width = std::max(1U, extent.width >> mip);
7939    extent.height = std::max(1U, extent.height >> mip);
7940    extent.depth = std::max(1U, extent.depth >> mip);
7941    return extent;
7942}
7943
7944// Test if the extent argument has all dimensions set to 0.
7945static inline bool IsExtentZero(const VkExtent3D *extent) {
7946    return ((extent->width == 0) && (extent->height == 0) && (extent->depth == 0));
7947}
7948
7949// Returns the image transfer granularity for a specific image scaled by compressed block size if necessary.
7950static inline VkExtent3D GetScaledItg(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const IMAGE_STATE *img) {
7951    // Default to (0, 0, 0) granularity in case we can't find the real granularity for the physical device.
7952    VkExtent3D granularity = {0, 0, 0};
7953    auto pPool = getCommandPoolNode(dev_data, cb_node->createInfo.commandPool);
7954    if (pPool) {
7955        granularity = dev_data->phys_dev_properties.queue_family_properties[pPool->queueFamilyIndex].minImageTransferGranularity;
7956        if (vk_format_is_compressed(img->createInfo.format)) {
7957            auto block_size = vk_format_compressed_block_size(img->createInfo.format);
7958            granularity.width *= block_size.width;
7959            granularity.height *= block_size.height;
7960        }
7961    }
7962    return granularity;
7963}
7964
7965// Test elements of a VkExtent3D structure against alignment constraints contained in another VkExtent3D structure
7966static inline bool IsExtentAligned(const VkExtent3D *extent, const VkExtent3D *granularity) {
7967    bool valid = true;
7968    if ((vk_safe_modulo(extent->depth, granularity->depth) != 0) || (vk_safe_modulo(extent->width, granularity->width) != 0) ||
7969        (vk_safe_modulo(extent->height, granularity->height) != 0)) {
7970        valid = false;
7971    }
7972    return valid;
7973}
7974
7975// Check elements of a VkOffset3D structure against a queue family's Image Transfer Granularity values
7976static inline bool CheckItgOffset(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const VkOffset3D *offset,
7977                                  const VkExtent3D *granularity, const uint32_t i, const char *function, const char *member) {
7978    bool skip = false;
7979    VkExtent3D offset_extent = {};
7980    offset_extent.width = static_cast<uint32_t>(abs(offset->x));
7981    offset_extent.height = static_cast<uint32_t>(abs(offset->y));
7982    offset_extent.depth = static_cast<uint32_t>(abs(offset->z));
7983    if (IsExtentZero(granularity)) {
7984        // If the queue family image transfer granularity is (0, 0, 0), then the offset must always be (0, 0, 0)
7985        if (IsExtentZero(&offset_extent) == false) {
7986            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7987                            DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
7988                            "%s: pRegion[%d].%s (x=%d, y=%d, z=%d) must be (x=0, y=0, z=0) "
7989                            "when the command buffer's queue family image transfer granularity is (w=0, h=0, d=0).",
7990                            function, i, member, offset->x, offset->y, offset->z);
7991        }
7992    } else {
7993        // If the queue family image transfer granularity is not (0, 0, 0), then the offset dimensions must always be even
7994        // integer multiples of the image transfer granularity.
7995        if (IsExtentAligned(&offset_extent, granularity) == false) {
7996            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7997                            DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
7998                            "%s: pRegion[%d].%s (x=%d, y=%d, z=%d) dimensions must be even integer "
7999                            "multiples of this command buffer's queue family image transfer granularity (w=%d, h=%d, d=%d).",
8000                            function, i, member, offset->x, offset->y, offset->z, granularity->width, granularity->height,
8001                            granularity->depth);
8002        }
8003    }
8004    return skip;
8005}
8006
8007// Check elements of a VkExtent3D structure against a queue family's Image Transfer Granularity values
8008static inline bool CheckItgExtent(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const VkExtent3D *extent,
8009                                  const VkOffset3D *offset, const VkExtent3D *granularity, const VkExtent3D *subresource_extent,
8010                                  const uint32_t i, const char *function, const char *member) {
8011    bool skip = false;
8012    if (IsExtentZero(granularity)) {
8013        // If the queue family image transfer granularity is (0, 0, 0), then the extent must always match the image
8014        // subresource extent.
8015        if (IsExtentEqual(extent, subresource_extent) == false) {
8016            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8017                            DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
8018                            "%s: pRegion[%d].%s (w=%d, h=%d, d=%d) must match the image subresource extents (w=%d, h=%d, d=%d) "
8019                            "when the command buffer's queue family image transfer granularity is (w=0, h=0, d=0).",
8020                            function, i, member, extent->width, extent->height, extent->depth, subresource_extent->width,
8021                            subresource_extent->height, subresource_extent->depth);
8022        }
8023    } else {
8024        // If the queue family image transfer granularity is not (0, 0, 0), then the extent dimensions must always be even
8025        // integer multiples of the image transfer granularity or the offset + extent dimensions must always match the image
8026        // subresource extent dimensions.
8027        VkExtent3D offset_extent_sum = {};
8028        offset_extent_sum.width = static_cast<uint32_t>(abs(offset->x)) + extent->width;
8029        offset_extent_sum.height = static_cast<uint32_t>(abs(offset->y)) + extent->height;
8030        offset_extent_sum.depth = static_cast<uint32_t>(abs(offset->z)) + extent->depth;
8031        if ((IsExtentAligned(extent, granularity) == false) && (IsExtentEqual(&offset_extent_sum, subresource_extent) == false)) {
8032            skip |=
8033                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8034                        DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
8035                        "%s: pRegion[%d].%s (w=%d, h=%d, d=%d) dimensions must be even integer multiples of this command buffer's "
8036                        "queue family image transfer granularity (w=%d, h=%d, d=%d) or offset (x=%d, y=%d, z=%d) + "
8037                        "extent (w=%d, h=%d, d=%d) must match the image subresource extents (w=%d, h=%d, d=%d).",
8038                        function, i, member, extent->width, extent->height, extent->depth, granularity->width, granularity->height,
8039                        granularity->depth, offset->x, offset->y, offset->z, extent->width, extent->height, extent->depth,
8040                        subresource_extent->width, subresource_extent->height, subresource_extent->depth);
8041        }
8042    }
8043    return skip;
8044}
8045
8046// Check a uint32_t width or stride value against a queue family's Image Transfer Granularity width value
8047static inline bool CheckItgInt(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const uint32_t value,
8048                               const uint32_t granularity, const uint32_t i, const char *function, const char *member) {
8049    bool skip = false;
8050    if (vk_safe_modulo(value, granularity) != 0) {
8051        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8052                        DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
8053                        "%s: pRegion[%d].%s (%d) must be an even integer multiple of this command buffer's queue family image "
8054                        "transfer granularity width (%d).",
8055                        function, i, member, value, granularity);
8056    }
8057    return skip;
8058}
8059
8060// Check a VkDeviceSize value against a queue family's Image Transfer Granularity width value
8061static inline bool CheckItgSize(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const VkDeviceSize value,
8062                                const uint32_t granularity, const uint32_t i, const char *function, const char *member) {
8063    bool skip = false;
8064    if (vk_safe_modulo(value, granularity) != 0) {
8065        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8066                        DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
8067                        "%s: pRegion[%d].%s (%" PRIdLEAST64
8068                        ") must be an even integer multiple of this command buffer's queue family image transfer "
8069                        "granularity width (%d).",
8070                        function, i, member, value, granularity);
8071    }
8072    return skip;
8073}
8074
8075// Check valid usage Image Tranfer Granularity requirements for elements of a VkImageCopy structure
8076static inline bool ValidateCopyImageTransferGranularityRequirements(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node,
8077                                                                    const IMAGE_STATE *img, const VkImageCopy *region,
8078                                                                    const uint32_t i, const char *function) {
8079    bool skip = false;
8080    VkExtent3D granularity = GetScaledItg(dev_data, cb_node, img);
8081    skip |= CheckItgOffset(dev_data, cb_node, &region->srcOffset, &granularity, i, function, "srcOffset");
8082    skip |= CheckItgOffset(dev_data, cb_node, &region->dstOffset, &granularity, i, function, "dstOffset");
8083    VkExtent3D subresource_extent = GetImageSubresourceExtent(img, &region->dstSubresource);
8084    skip |= CheckItgExtent(dev_data, cb_node, &region->extent, &region->dstOffset, &granularity, &subresource_extent, i, function,
8085                           "extent");
8086    return skip;
8087}
8088
8089// Check valid usage Image Tranfer Granularity requirements for elements of a VkBufferImageCopy structure
8090static inline bool ValidateCopyBufferImageTransferGranularityRequirements(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node,
8091                                                                          const IMAGE_STATE *img, const VkBufferImageCopy *region,
8092                                                                          const uint32_t i, const char *function) {
8093    bool skip = false;
8094    if (vk_format_is_compressed(img->createInfo.format) == true) {
8095        // TODO: Add granularity checking for compressed formats
8096
8097        // bufferRowLength must be a multiple of the compressed texel block width
8098        // bufferImageHeight must be a multiple of the compressed texel block height
8099        // all members of imageOffset must be a multiple of the corresponding dimensions of the compressed texel block
8100        // bufferOffset must be a multiple of the compressed texel block size in bytes
8101        // imageExtent.width must be a multiple of the compressed texel block width or (imageExtent.width + imageOffset.x)
8102        //     must equal the image subresource width
8103        // imageExtent.height must be a multiple of the compressed texel block height or (imageExtent.height + imageOffset.y)
8104        //     must equal the image subresource height
8105        // imageExtent.depth must be a multiple of the compressed texel block depth or (imageExtent.depth + imageOffset.z)
8106        //     must equal the image subresource depth
8107    } else {
8108        VkExtent3D granularity = GetScaledItg(dev_data, cb_node, img);
8109        skip |= CheckItgSize(dev_data, cb_node, region->bufferOffset, granularity.width, i, function, "bufferOffset");
8110        skip |= CheckItgInt(dev_data, cb_node, region->bufferRowLength, granularity.width, i, function, "bufferRowLength");
8111        skip |= CheckItgInt(dev_data, cb_node, region->bufferImageHeight, granularity.width, i, function, "bufferImageHeight");
8112        skip |= CheckItgOffset(dev_data, cb_node, &region->imageOffset, &granularity, i, function, "imageOffset");
8113        VkExtent3D subresource_extent = GetImageSubresourceExtent(img, &region->imageSubresource);
8114        skip |= CheckItgExtent(dev_data, cb_node, &region->imageExtent, &region->imageOffset, &granularity, &subresource_extent, i,
8115                               function, "imageExtent");
8116    }
8117    return skip;
8118}
8119
8120VKAPI_ATTR void VKAPI_CALL CmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
8121                                        VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
8122                                        const VkImageCopy *pRegions) {
8123    bool skip_call = false;
8124    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8125    std::unique_lock<std::mutex> lock(global_lock);
8126
8127    auto cb_node = getCBNode(dev_data, commandBuffer);
8128    auto src_image_state = getImageState(dev_data, srcImage);
8129    auto dst_image_state = getImageState(dev_data, dstImage);
8130    if (cb_node && src_image_state && dst_image_state) {
8131
8132        skip_call = PreCallValidateCmdCopyImage(dev_data, cb_node, src_image_state, dst_image_state, regionCount, pRegions);
8133
8134        skip_call |= ValidateMemoryIsBoundToImage(dev_data, src_image_state, "vkCmdCopyImage()", VALIDATION_ERROR_02533);
8135        skip_call |= ValidateMemoryIsBoundToImage(dev_data, dst_image_state, "vkCmdCopyImage()", VALIDATION_ERROR_02534);
8136        // Update bindings between images and cmd buffer
8137        AddCommandBufferBindingImage(dev_data, cb_node, src_image_state);
8138        AddCommandBufferBindingImage(dev_data, cb_node, dst_image_state);
8139        // Validate that SRC & DST images have correct usage flags set
8140        skip_call |= ValidateImageUsageFlags(dev_data, src_image_state, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
8141                                             VALIDATION_ERROR_01178, "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
8142        skip_call |= ValidateImageUsageFlags(dev_data, dst_image_state, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
8143                                             VALIDATION_ERROR_01181, "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
8144        std::function<bool()> function = [=]() {
8145            return ValidateImageMemoryIsValid(dev_data, src_image_state, "vkCmdCopyImage()");
8146        };
8147        cb_node->validate_functions.push_back(function);
8148        function = [=]() {
8149            SetImageMemoryValid(dev_data, dst_image_state, true);
8150            return false;
8151        };
8152        cb_node->validate_functions.push_back(function);
8153
8154        skip_call |= ValidateCmd(dev_data, cb_node, CMD_COPYIMAGE, "vkCmdCopyImage()");
8155        UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_COPYIMAGE);
8156        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyImage()", VALIDATION_ERROR_01194);
8157        for (uint32_t i = 0; i < regionCount; ++i) {
8158            skip_call |= VerifySourceImageLayout(dev_data, cb_node, srcImage, pRegions[i].srcSubresource, srcImageLayout,
8159                                                 VALIDATION_ERROR_01180);
8160            skip_call |= VerifyDestImageLayout(dev_data, cb_node, dstImage, pRegions[i].dstSubresource, dstImageLayout,
8161                                               VALIDATION_ERROR_01183);
8162            skip_call |= ValidateCopyImageTransferGranularityRequirements(dev_data, cb_node, dst_image_state, &pRegions[i], i,
8163                                                                          "vkCmdCopyImage()");
8164        }
8165    } else {
8166        assert(0);
8167    }
8168    lock.unlock();
8169    if (!skip_call)
8170        dev_data->dispatch_table.CmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
8171                                              pRegions);
8172}
8173
8174// Validate that an image's sampleCount matches the requirement for a specific API call
8175bool ValidateImageSampleCount(layer_data *dev_data, IMAGE_STATE *image_state, VkSampleCountFlagBits sample_count,
8176                              const char *location, UNIQUE_VALIDATION_ERROR_CODE msgCode) {
8177    bool skip = false;
8178    if (image_state->createInfo.samples != sample_count) {
8179        skip =
8180            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
8181                    reinterpret_cast<uint64_t &>(image_state->image), 0, msgCode, "DS",
8182                    "%s for image 0x%" PRIxLEAST64 " was created with a sample count of %s but must be %s. %s", location,
8183                    reinterpret_cast<uint64_t &>(image_state->image), string_VkSampleCountFlagBits(image_state->createInfo.samples),
8184                    string_VkSampleCountFlagBits(sample_count), validation_error_map[msgCode]);
8185    }
8186    return skip;
8187}
8188
8189VKAPI_ATTR void VKAPI_CALL CmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
8190                                        VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
8191                                        const VkImageBlit *pRegions, VkFilter filter) {
8192    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8193    std::unique_lock<std::mutex> lock(global_lock);
8194
8195    auto cb_node = getCBNode(dev_data, commandBuffer);
8196    auto src_image_state = getImageState(dev_data, srcImage);
8197    auto dst_image_state = getImageState(dev_data, dstImage);
8198
8199    bool skip = PreCallValidateCmdBlitImage(dev_data, cb_node, src_image_state, dst_image_state, regionCount, pRegions, filter);
8200
8201    if (!skip) {
8202        PreCallRecordCmdBlitImage(dev_data, cb_node, src_image_state, dst_image_state);
8203        lock.unlock();
8204        dev_data->dispatch_table.CmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
8205                                              pRegions, filter);
8206    }
8207}
8208
8209VKAPI_ATTR void VKAPI_CALL CmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
8210                                                VkImageLayout dstImageLayout, uint32_t regionCount,
8211                                                const VkBufferImageCopy *pRegions) {
8212    bool skip_call = false;
8213    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8214    std::unique_lock<std::mutex> lock(global_lock);
8215
8216    auto cb_node = getCBNode(dev_data, commandBuffer);
8217    auto src_buff_state = getBufferState(dev_data, srcBuffer);
8218    auto dst_image_state = getImageState(dev_data, dstImage);
8219    if (cb_node && src_buff_state && dst_image_state) {
8220        skip_call |= ValidateImageSampleCount(dev_data, dst_image_state, VK_SAMPLE_COUNT_1_BIT,
8221                                              "vkCmdCopyBufferToImage(): dstImage", VALIDATION_ERROR_01232);
8222        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, src_buff_state, "vkCmdCopyBufferToImage()", VALIDATION_ERROR_02535);
8223        skip_call |= ValidateMemoryIsBoundToImage(dev_data, dst_image_state, "vkCmdCopyBufferToImage()", VALIDATION_ERROR_02536);
8224        AddCommandBufferBindingBuffer(dev_data, cb_node, src_buff_state);
8225        AddCommandBufferBindingImage(dev_data, cb_node, dst_image_state);
8226        skip_call |=
8227            ValidateBufferUsageFlags(dev_data, src_buff_state, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true, VALIDATION_ERROR_01230,
8228                                     "vkCmdCopyBufferToImage()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
8229        skip_call |= ValidateImageUsageFlags(dev_data, dst_image_state, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
8230                                             VALIDATION_ERROR_01231, "vkCmdCopyBufferToImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
8231        std::function<bool()> function = [=]() {
8232            SetImageMemoryValid(dev_data, dst_image_state, true);
8233            return false;
8234        };
8235        cb_node->validate_functions.push_back(function);
8236        function = [=]() { return ValidateBufferMemoryIsValid(dev_data, src_buff_state, "vkCmdCopyBufferToImage()"); };
8237        cb_node->validate_functions.push_back(function);
8238
8239        skip_call |= ValidateCmd(dev_data, cb_node, CMD_COPYBUFFERTOIMAGE, "vkCmdCopyBufferToImage()");
8240        UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_COPYBUFFERTOIMAGE);
8241        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyBufferToImage()", VALIDATION_ERROR_01242);
8242        for (uint32_t i = 0; i < regionCount; ++i) {
8243            skip_call |= VerifyDestImageLayout(dev_data, cb_node, dstImage, pRegions[i].imageSubresource, dstImageLayout,
8244                                               VALIDATION_ERROR_01234);
8245            skip_call |= ValidateCopyBufferImageTransferGranularityRequirements(dev_data, cb_node, dst_image_state, &pRegions[i], i,
8246                                                                                "vkCmdCopyBufferToImage()");
8247        }
8248    } else {
8249        assert(0);
8250    }
8251    lock.unlock();
8252    if (!skip_call)
8253        dev_data->dispatch_table.CmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions);
8254}
8255
8256VKAPI_ATTR void VKAPI_CALL CmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
8257                                                VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy *pRegions) {
8258    bool skip_call = false;
8259    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8260    std::unique_lock<std::mutex> lock(global_lock);
8261
8262    auto cb_node = getCBNode(dev_data, commandBuffer);
8263    auto src_image_state = getImageState(dev_data, srcImage);
8264    auto dst_buff_state = getBufferState(dev_data, dstBuffer);
8265    if (cb_node && src_image_state && dst_buff_state) {
8266        skip_call |= ValidateImageSampleCount(dev_data, src_image_state, VK_SAMPLE_COUNT_1_BIT,
8267                                              "vkCmdCopyImageToBuffer(): srcImage", VALIDATION_ERROR_01249);
8268        skip_call |= ValidateMemoryIsBoundToImage(dev_data, src_image_state, "vkCmdCopyImageToBuffer()", VALIDATION_ERROR_02537);
8269        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_state, "vkCmdCopyImageToBuffer()", VALIDATION_ERROR_02538);
8270        // Update bindings between buffer/image and cmd buffer
8271        AddCommandBufferBindingImage(dev_data, cb_node, src_image_state);
8272        AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_state);
8273        // Validate that SRC image & DST buffer have correct usage flags set
8274        skip_call |= ValidateImageUsageFlags(dev_data, src_image_state, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
8275                                             VALIDATION_ERROR_01248, "vkCmdCopyImageToBuffer()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
8276        skip_call |=
8277            ValidateBufferUsageFlags(dev_data, dst_buff_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, VALIDATION_ERROR_01252,
8278                                     "vkCmdCopyImageToBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8279        std::function<bool()> function = [=]() {
8280            return ValidateImageMemoryIsValid(dev_data, src_image_state, "vkCmdCopyImageToBuffer()");
8281        };
8282        cb_node->validate_functions.push_back(function);
8283        function = [=]() {
8284            SetBufferMemoryValid(dev_data, dst_buff_state, true);
8285            return false;
8286        };
8287        cb_node->validate_functions.push_back(function);
8288
8289        skip_call |= ValidateCmd(dev_data, cb_node, CMD_COPYIMAGETOBUFFER, "vkCmdCopyImageToBuffer()");
8290        UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_COPYIMAGETOBUFFER);
8291        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyImageToBuffer()", VALIDATION_ERROR_01260);
8292        for (uint32_t i = 0; i < regionCount; ++i) {
8293            skip_call |= VerifySourceImageLayout(dev_data, cb_node, srcImage, pRegions[i].imageSubresource, srcImageLayout,
8294                                                 VALIDATION_ERROR_01251);
8295            skip_call |= ValidateCopyBufferImageTransferGranularityRequirements(dev_data, cb_node, src_image_state, &pRegions[i], i,
8296                                                                                "CmdCopyImageToBuffer");
8297        }
8298    } else {
8299        assert(0);
8300    }
8301    lock.unlock();
8302    if (!skip_call)
8303        dev_data->dispatch_table.CmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions);
8304}
8305
8306VKAPI_ATTR void VKAPI_CALL CmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
8307                                           VkDeviceSize dataSize, const uint32_t *pData) {
8308    bool skip_call = false;
8309    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8310    std::unique_lock<std::mutex> lock(global_lock);
8311
8312    auto cb_node = getCBNode(dev_data, commandBuffer);
8313    auto dst_buff_state = getBufferState(dev_data, dstBuffer);
8314    if (cb_node && dst_buff_state) {
8315        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_state, "vkCmdUpdateBuffer()", VALIDATION_ERROR_02530);
8316        // Update bindings between buffer and cmd buffer
8317        AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_state);
8318        // Validate that DST buffer has correct usage flags set
8319        skip_call |= ValidateBufferUsageFlags(dev_data, dst_buff_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8320                                              VALIDATION_ERROR_01146, "vkCmdUpdateBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8321        std::function<bool()> function = [=]() {
8322            SetBufferMemoryValid(dev_data, dst_buff_state, true);
8323            return false;
8324        };
8325        cb_node->validate_functions.push_back(function);
8326
8327        skip_call |= ValidateCmd(dev_data, cb_node, CMD_UPDATEBUFFER, "vkCmdUpdateBuffer()");
8328        UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_UPDATEBUFFER);
8329        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdUpdateBuffer()", VALIDATION_ERROR_01155);
8330    } else {
8331        assert(0);
8332    }
8333    lock.unlock();
8334    if (!skip_call) dev_data->dispatch_table.CmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData);
8335}
8336
8337VKAPI_ATTR void VKAPI_CALL CmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
8338                                         VkDeviceSize size, uint32_t data) {
8339    bool skip_call = false;
8340    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8341    std::unique_lock<std::mutex> lock(global_lock);
8342
8343    auto cb_node = getCBNode(dev_data, commandBuffer);
8344    auto dst_buff_state = getBufferState(dev_data, dstBuffer);
8345    if (cb_node && dst_buff_state) {
8346        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_state, "vkCmdFillBuffer()", VALIDATION_ERROR_02529);
8347        // Update bindings between buffer and cmd buffer
8348        AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_state);
8349        // Validate that DST buffer has correct usage flags set
8350        skip_call |= ValidateBufferUsageFlags(dev_data, dst_buff_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8351                                              VALIDATION_ERROR_01137, "vkCmdFillBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8352        std::function<bool()> function = [=]() {
8353            SetBufferMemoryValid(dev_data, dst_buff_state, true);
8354            return false;
8355        };
8356        cb_node->validate_functions.push_back(function);
8357
8358        skip_call |= ValidateCmd(dev_data, cb_node, CMD_FILLBUFFER, "vkCmdFillBuffer()");
8359        UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_FILLBUFFER);
8360        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdFillBuffer()", VALIDATION_ERROR_01142);
8361    } else {
8362        assert(0);
8363    }
8364    lock.unlock();
8365    if (!skip_call) dev_data->dispatch_table.CmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
8366}
8367
8368VKAPI_ATTR void VKAPI_CALL CmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
8369                                               const VkClearAttachment *pAttachments, uint32_t rectCount,
8370                                               const VkClearRect *pRects) {
8371    bool skip = false;
8372    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8373    {
8374        std::lock_guard<std::mutex> lock(global_lock);
8375        skip = PreCallValidateCmdClearAttachments(dev_data, commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
8376    }
8377    if (!skip) dev_data->dispatch_table.CmdClearAttachments(commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
8378}
8379
8380VKAPI_ATTR void VKAPI_CALL CmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
8381                                              const VkClearColorValue *pColor, uint32_t rangeCount,
8382                                              const VkImageSubresourceRange *pRanges) {
8383    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8384    std::unique_lock<std::mutex> lock(global_lock);
8385
8386    bool skip = PreCallValidateCmdClearColorImage(dev_data, commandBuffer, image, imageLayout, rangeCount, pRanges);
8387    if (!skip) {
8388        PreCallRecordCmdClearImage(dev_data, commandBuffer, image, imageLayout, rangeCount, pRanges, CMD_CLEARCOLORIMAGE);
8389        lock.unlock();
8390        dev_data->dispatch_table.CmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
8391    }
8392}
8393
8394VKAPI_ATTR void VKAPI_CALL CmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
8395                                                     const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
8396                                                     const VkImageSubresourceRange *pRanges) {
8397    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8398    std::unique_lock<std::mutex> lock(global_lock);
8399
8400    bool skip = PreCallValidateCmdClearDepthStencilImage(dev_data, commandBuffer, image, imageLayout, rangeCount, pRanges);
8401    if (!skip) {
8402        PreCallRecordCmdClearImage(dev_data, commandBuffer, image, imageLayout, rangeCount, pRanges, CMD_CLEARDEPTHSTENCILIMAGE);
8403        lock.unlock();
8404        dev_data->dispatch_table.CmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount, pRanges);
8405    }
8406}
8407
8408VKAPI_ATTR void VKAPI_CALL CmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
8409                                           VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
8410                                           const VkImageResolve *pRegions) {
8411    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8412    std::unique_lock<std::mutex> lock(global_lock);
8413
8414    auto cb_node = getCBNode(dev_data, commandBuffer);
8415    auto src_image_state = getImageState(dev_data, srcImage);
8416    auto dst_image_state = getImageState(dev_data, dstImage);
8417
8418    bool skip = PreCallValidateCmdResolveImage(dev_data, cb_node, src_image_state, dst_image_state);
8419
8420    if (!skip) {
8421        PreCallRecordCmdResolveImage(dev_data, cb_node, src_image_state, dst_image_state);
8422        lock.unlock();
8423        dev_data->dispatch_table.CmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
8424                                                 pRegions);
8425    }
8426}
8427
8428bool setEventStageMask(VkQueue queue, VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
8429    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8430    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8431    if (pCB) {
8432        pCB->eventToStageMap[event] = stageMask;
8433    }
8434    auto queue_data = dev_data->queueMap.find(queue);
8435    if (queue_data != dev_data->queueMap.end()) {
8436        queue_data->second.eventToStageMap[event] = stageMask;
8437    }
8438    return false;
8439}
8440
8441VKAPI_ATTR void VKAPI_CALL CmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
8442    bool skip_call = false;
8443    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8444    std::unique_lock<std::mutex> lock(global_lock);
8445    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8446    if (pCB) {
8447        skip_call |= ValidateCmd(dev_data, pCB, CMD_SETEVENT, "vkCmdSetEvent()");
8448        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETEVENT);
8449        skip_call |= insideRenderPass(dev_data, pCB, "vkCmdSetEvent()", VALIDATION_ERROR_00238);
8450        skip_call |=
8451            ValidateStageMaskGsTsEnables(dev_data, stageMask, "vkCmdSetEvent()", VALIDATION_ERROR_00230, VALIDATION_ERROR_00231);
8452        auto event_state = getEventNode(dev_data, event);
8453        if (event_state) {
8454            addCommandBufferBinding(&event_state->cb_bindings,
8455                                    {reinterpret_cast<uint64_t &>(event), VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT}, pCB);
8456            event_state->cb_bindings.insert(pCB);
8457        }
8458        pCB->events.push_back(event);
8459        if (!pCB->waitedEvents.count(event)) {
8460            pCB->writeEventsBeforeWait.push_back(event);
8461        }
8462        std::function<bool(VkQueue)> eventUpdate =
8463            std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, stageMask);
8464        pCB->eventUpdates.push_back(eventUpdate);
8465    }
8466    lock.unlock();
8467    if (!skip_call) dev_data->dispatch_table.CmdSetEvent(commandBuffer, event, stageMask);
8468}
8469
8470VKAPI_ATTR void VKAPI_CALL CmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
8471    bool skip_call = false;
8472    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8473    std::unique_lock<std::mutex> lock(global_lock);
8474    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8475    if (pCB) {
8476        skip_call |= ValidateCmd(dev_data, pCB, CMD_RESETEVENT, "vkCmdResetEvent()");
8477        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_RESETEVENT);
8478        skip_call |= insideRenderPass(dev_data, pCB, "vkCmdResetEvent()", VALIDATION_ERROR_00249);
8479        skip_call |=
8480            ValidateStageMaskGsTsEnables(dev_data, stageMask, "vkCmdResetEvent()", VALIDATION_ERROR_00240, VALIDATION_ERROR_00241);
8481        auto event_state = getEventNode(dev_data, event);
8482        if (event_state) {
8483            addCommandBufferBinding(&event_state->cb_bindings,
8484                                    {reinterpret_cast<uint64_t &>(event), VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT}, pCB);
8485            event_state->cb_bindings.insert(pCB);
8486        }
8487        pCB->events.push_back(event);
8488        if (!pCB->waitedEvents.count(event)) {
8489            pCB->writeEventsBeforeWait.push_back(event);
8490        }
8491        // TODO : Add check for VALIDATION_ERROR_00226
8492        std::function<bool(VkQueue)> eventUpdate =
8493            std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, VkPipelineStageFlags(0));
8494        pCB->eventUpdates.push_back(eventUpdate);
8495    }
8496    lock.unlock();
8497    if (!skip_call) dev_data->dispatch_table.CmdResetEvent(commandBuffer, event, stageMask);
8498}
8499
8500// Print readable FlagBits in FlagMask
8501static std::string string_VkAccessFlags(VkAccessFlags accessMask) {
8502    std::string result;
8503    std::string separator;
8504
8505    if (accessMask == 0) {
8506        result = "[None]";
8507    } else {
8508        result = "[";
8509        for (auto i = 0; i < 32; i++) {
8510            if (accessMask & (1 << i)) {
8511                result = result + separator + string_VkAccessFlagBits((VkAccessFlagBits)(1 << i));
8512                separator = " | ";
8513            }
8514        }
8515        result = result + "]";
8516    }
8517    return result;
8518}
8519
8520// AccessFlags MUST have 'required_bit' set, and may have one or more of 'optional_bits' set.
8521// If required_bit is zero, accessMask must have at least one of 'optional_bits' set
8522// TODO: Add tracking to ensure that at least one barrier has been set for these layout transitions
8523static bool ValidateMaskBits(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
8524                             const VkImageLayout &layout, VkAccessFlags required_bit, VkAccessFlags optional_bits,
8525                             const char *type) {
8526    bool skip_call = false;
8527
8528    if ((accessMask & required_bit) || (!required_bit && (accessMask & optional_bits))) {
8529        if (accessMask & ~(required_bit | optional_bits)) {
8530            // TODO: Verify against Valid Use
8531            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8532                                 DRAWSTATE_INVALID_BARRIER, "DS",
8533                                 "Additional bits in %s accessMask 0x%X %s are specified when layout is %s.", type, accessMask,
8534                                 string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
8535        }
8536    } else {
8537        if (!required_bit) {
8538            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8539                                 DRAWSTATE_INVALID_BARRIER, "DS",
8540                                 "%s AccessMask %d %s must contain at least one of access bits %d "
8541                                 "%s when layout is %s, unless the app has previously added a "
8542                                 "barrier for this transition.",
8543                                 type, accessMask, string_VkAccessFlags(accessMask).c_str(), optional_bits,
8544                                 string_VkAccessFlags(optional_bits).c_str(), string_VkImageLayout(layout));
8545        } else {
8546            std::string opt_bits;
8547            if (optional_bits != 0) {
8548                std::stringstream ss;
8549                ss << optional_bits;
8550                opt_bits = "and may have optional bits " + ss.str() + ' ' + string_VkAccessFlags(optional_bits);
8551            }
8552            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8553                                 DRAWSTATE_INVALID_BARRIER, "DS",
8554                                 "%s AccessMask %d %s must have required access bit %d %s %s when "
8555                                 "layout is %s, unless the app has previously added a barrier for "
8556                                 "this transition.",
8557                                 type, accessMask, string_VkAccessFlags(accessMask).c_str(), required_bit,
8558                                 string_VkAccessFlags(required_bit).c_str(), opt_bits.c_str(), string_VkImageLayout(layout));
8559        }
8560    }
8561    return skip_call;
8562}
8563
8564static bool ValidateMaskBitsFromLayouts(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
8565                                        const VkImageLayout &layout, const char *type) {
8566    bool skip_call = false;
8567    switch (layout) {
8568        case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: {
8569            skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
8570                                          VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_INPUT_ATTACHMENT_READ_BIT, type);
8571            break;
8572        }
8573        case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: {
8574            skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
8575                                          VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_INPUT_ATTACHMENT_READ_BIT, type);
8576            break;
8577        }
8578        case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: {
8579            skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_WRITE_BIT, 0, type);
8580            break;
8581        }
8582        case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: {
8583            skip_call |= ValidateMaskBits(
8584                my_data, cmdBuffer, accessMask, layout, 0,
8585                VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_INPUT_ATTACHMENT_READ_BIT,
8586                type);
8587            break;
8588        }
8589        case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: {
8590            skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
8591                                          VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type);
8592            break;
8593        }
8594        case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: {
8595            skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_READ_BIT, 0, type);
8596            break;
8597        }
8598        case VK_IMAGE_LAYOUT_PRESENT_SRC_KHR: {
8599            skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_MEMORY_READ_BIT, 0, type);
8600            break;
8601        }
8602        case VK_IMAGE_LAYOUT_UNDEFINED: {
8603            if (accessMask != 0) {
8604                // TODO: Verify against Valid Use section spec
8605                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8606                                     __LINE__, DRAWSTATE_INVALID_BARRIER, "DS",
8607                                     "Additional bits in %s accessMask 0x%X %s are specified when layout is %s.", type, accessMask,
8608                                     string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
8609            }
8610            break;
8611        }
8612        case VK_IMAGE_LAYOUT_GENERAL:
8613        default: { break; }
8614    }
8615    return skip_call;
8616}
8617
8618static bool ValidateBarriers(const char *funcName, VkCommandBuffer cmdBuffer, uint32_t memBarrierCount,
8619                             const VkMemoryBarrier *pMemBarriers, uint32_t bufferBarrierCount,
8620                             const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
8621                             const VkImageMemoryBarrier *pImageMemBarriers) {
8622    bool skip = false;
8623    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
8624    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
8625    if (pCB->activeRenderPass && memBarrierCount) {
8626        if (!pCB->activeRenderPass->hasSelfDependency[pCB->activeSubpass]) {
8627            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8628                            DRAWSTATE_INVALID_BARRIER, "DS",
8629                            "%s: Barriers cannot be set during subpass %d "
8630                            "with no self dependency specified.",
8631                            funcName, pCB->activeSubpass);
8632        }
8633    }
8634    for (uint32_t i = 0; i < imageMemBarrierCount; ++i) {
8635        auto mem_barrier = &pImageMemBarriers[i];
8636        auto image_data = getImageState(dev_data, mem_barrier->image);
8637        if (image_data) {
8638            uint32_t src_q_f_index = mem_barrier->srcQueueFamilyIndex;
8639            uint32_t dst_q_f_index = mem_barrier->dstQueueFamilyIndex;
8640            if (image_data->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
8641                // srcQueueFamilyIndex and dstQueueFamilyIndex must both
8642                // be VK_QUEUE_FAMILY_IGNORED
8643                if ((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) {
8644                    skip |=
8645                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8646                                DRAWSTATE_INVALID_QUEUE_INDEX, "DS", "%s: Image Barrier for image 0x%" PRIx64
8647                                                                     " was created with sharingMode of "
8648                                                                     "VK_SHARING_MODE_CONCURRENT. Src and dst "
8649                                                                     "queueFamilyIndices must be VK_QUEUE_FAMILY_IGNORED.",
8650                                funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
8651                }
8652            } else {
8653                // Sharing mode is VK_SHARING_MODE_EXCLUSIVE. srcQueueFamilyIndex and
8654                // dstQueueFamilyIndex must either both be VK_QUEUE_FAMILY_IGNORED,
8655                // or both be a valid queue family
8656                if (((src_q_f_index == VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index == VK_QUEUE_FAMILY_IGNORED)) &&
8657                    (src_q_f_index != dst_q_f_index)) {
8658                    skip |=
8659                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8660                                DRAWSTATE_INVALID_QUEUE_INDEX, "DS", "%s: Image 0x%" PRIx64
8661                                                                     " was created with sharingMode "
8662                                                                     "of VK_SHARING_MODE_EXCLUSIVE. If one of src- or "
8663                                                                     "dstQueueFamilyIndex is VK_QUEUE_FAMILY_IGNORED, both "
8664                                                                     "must be.",
8665                                funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
8666                } else if (((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) && (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) &&
8667                           ((src_q_f_index >= dev_data->phys_dev_properties.queue_family_properties.size()) ||
8668                            (dst_q_f_index >= dev_data->phys_dev_properties.queue_family_properties.size()))) {
8669                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8670                                    __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
8671                                    "%s: Image 0x%" PRIx64
8672                                    " was created with sharingMode "
8673                                    "of VK_SHARING_MODE_EXCLUSIVE, but srcQueueFamilyIndex %d"
8674                                    " or dstQueueFamilyIndex %d is greater than " PRINTF_SIZE_T_SPECIFIER
8675                                    "queueFamilies crated for this device.",
8676                                    funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image), src_q_f_index, dst_q_f_index,
8677                                    dev_data->phys_dev_properties.queue_family_properties.size());
8678                }
8679            }
8680        }
8681
8682        if (mem_barrier) {
8683            if (mem_barrier->oldLayout != mem_barrier->newLayout) {
8684                skip |=
8685                    ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->srcAccessMask, mem_barrier->oldLayout, "Source");
8686                skip |=
8687                    ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->dstAccessMask, mem_barrier->newLayout, "Dest");
8688            }
8689            if (mem_barrier->newLayout == VK_IMAGE_LAYOUT_UNDEFINED || mem_barrier->newLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
8690                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8691                                DRAWSTATE_INVALID_BARRIER, "DS",
8692                                "%s: Image Layout cannot be transitioned to UNDEFINED or "
8693                                "PREINITIALIZED.",
8694                                funcName);
8695            }
8696            VkFormat format = VK_FORMAT_UNDEFINED;
8697            uint32_t arrayLayers = 0, mipLevels = 0;
8698            bool imageFound = false;
8699            if (image_data) {
8700                format = image_data->createInfo.format;
8701                arrayLayers = image_data->createInfo.arrayLayers;
8702                mipLevels = image_data->createInfo.mipLevels;
8703                imageFound = true;
8704            } else if (dev_data->device_extensions.wsi_enabled) {
8705                auto imageswap_data = getSwapchainFromImage(dev_data, mem_barrier->image);
8706                if (imageswap_data) {
8707                    auto swapchain_data = getSwapchainNode(dev_data, imageswap_data);
8708                    if (swapchain_data) {
8709                        format = swapchain_data->createInfo.imageFormat;
8710                        arrayLayers = swapchain_data->createInfo.imageArrayLayers;
8711                        mipLevels = 1;
8712                        imageFound = true;
8713                    }
8714                }
8715            }
8716            if (imageFound) {
8717                skip |= ValidateImageSubrangeLevelLayerCounts(dev_data, mem_barrier->subresourceRange, funcName);
8718                auto aspect_mask = mem_barrier->subresourceRange.aspectMask;
8719                skip |= ValidateImageAspectMask(dev_data, image_data->image, format, aspect_mask, funcName);
8720                int layerCount = (mem_barrier->subresourceRange.layerCount == VK_REMAINING_ARRAY_LAYERS)
8721                                     ? 1
8722                                     : mem_barrier->subresourceRange.layerCount;
8723                if ((mem_barrier->subresourceRange.baseArrayLayer + layerCount) > arrayLayers) {
8724                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8725                                    __LINE__, DRAWSTATE_INVALID_BARRIER, "DS",
8726                                    "%s: Subresource must have the sum of the "
8727                                    "baseArrayLayer (%d) and layerCount (%d) be less "
8728                                    "than or equal to the total number of layers (%d).",
8729                                    funcName, mem_barrier->subresourceRange.baseArrayLayer,
8730                                    mem_barrier->subresourceRange.layerCount, arrayLayers);
8731                }
8732                int levelCount = (mem_barrier->subresourceRange.levelCount == VK_REMAINING_MIP_LEVELS)
8733                                     ? 1
8734                                     : mem_barrier->subresourceRange.levelCount;
8735                if ((mem_barrier->subresourceRange.baseMipLevel + levelCount) > mipLevels) {
8736                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8737                                    __LINE__, DRAWSTATE_INVALID_BARRIER, "DS",
8738                                    "%s: Subresource must have the sum of the baseMipLevel "
8739                                    "(%d) and levelCount (%d) be less than or equal to "
8740                                    "the total number of levels (%d).",
8741                                    funcName, mem_barrier->subresourceRange.baseMipLevel, mem_barrier->subresourceRange.levelCount,
8742                                    mipLevels);
8743                }
8744            }
8745        }
8746    }
8747    for (uint32_t i = 0; i < bufferBarrierCount; ++i) {
8748        auto mem_barrier = &pBufferMemBarriers[i];
8749        if (pCB->activeRenderPass) {
8750            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8751                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barriers cannot be used during a render pass.", funcName);
8752        }
8753        if (!mem_barrier) continue;
8754
8755        // Validate buffer barrier queue family indices
8756        if ((mem_barrier->srcQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
8757             mem_barrier->srcQueueFamilyIndex >= dev_data->phys_dev_properties.queue_family_properties.size()) ||
8758            (mem_barrier->dstQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
8759             mem_barrier->dstQueueFamilyIndex >= dev_data->phys_dev_properties.queue_family_properties.size())) {
8760            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8761                            DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
8762                            "%s: Buffer Barrier 0x%" PRIx64
8763                            " has QueueFamilyIndex greater "
8764                            "than the number of QueueFamilies (" PRINTF_SIZE_T_SPECIFIER ") for this device.",
8765                            funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
8766                            dev_data->phys_dev_properties.queue_family_properties.size());
8767        }
8768
8769        auto buffer_state = getBufferState(dev_data, mem_barrier->buffer);
8770        if (buffer_state) {
8771            auto buffer_size = buffer_state->requirements.size;
8772            if (mem_barrier->offset >= buffer_size) {
8773                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8774                                DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64
8775                                                                 " which is not less than total size 0x%" PRIx64 ".",
8776                                funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
8777                                reinterpret_cast<const uint64_t &>(mem_barrier->offset),
8778                                reinterpret_cast<const uint64_t &>(buffer_size));
8779            } else if (mem_barrier->size != VK_WHOLE_SIZE && (mem_barrier->offset + mem_barrier->size > buffer_size)) {
8780                skip |= log_msg(
8781                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8782                    DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64 " and size 0x%" PRIx64
8783                                                     " whose sum is greater than total size 0x%" PRIx64 ".",
8784                    funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
8785                    reinterpret_cast<const uint64_t &>(mem_barrier->offset), reinterpret_cast<const uint64_t &>(mem_barrier->size),
8786                    reinterpret_cast<const uint64_t &>(buffer_size));
8787            }
8788        }
8789    }
8790    return skip;
8791}
8792
8793bool validateEventStageMask(VkQueue queue, GLOBAL_CB_NODE *pCB, uint32_t eventCount, size_t firstEventIndex,
8794                            VkPipelineStageFlags sourceStageMask) {
8795    bool skip_call = false;
8796    VkPipelineStageFlags stageMask = 0;
8797    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
8798    for (uint32_t i = 0; i < eventCount; ++i) {
8799        auto event = pCB->events[firstEventIndex + i];
8800        auto queue_data = dev_data->queueMap.find(queue);
8801        if (queue_data == dev_data->queueMap.end()) return false;
8802        auto event_data = queue_data->second.eventToStageMap.find(event);
8803        if (event_data != queue_data->second.eventToStageMap.end()) {
8804            stageMask |= event_data->second;
8805        } else {
8806            auto global_event_data = getEventNode(dev_data, event);
8807            if (!global_event_data) {
8808                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
8809                                     reinterpret_cast<const uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
8810                                     "Event 0x%" PRIx64 " cannot be waited on if it has never been set.",
8811                                     reinterpret_cast<const uint64_t &>(event));
8812            } else {
8813                stageMask |= global_event_data->stageMask;
8814            }
8815        }
8816    }
8817    // TODO: Need to validate that host_bit is only set if set event is called
8818    // but set event can be called at any time.
8819    if (sourceStageMask != stageMask && sourceStageMask != (stageMask | VK_PIPELINE_STAGE_HOST_BIT)) {
8820        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8821                             VALIDATION_ERROR_00254, "DS",
8822                             "Submitting cmdbuffer with call to VkCmdWaitEvents "
8823                             "using srcStageMask 0x%X which must be the bitwise "
8824                             "OR of the stageMask parameters used in calls to "
8825                             "vkCmdSetEvent and VK_PIPELINE_STAGE_HOST_BIT if "
8826                             "used with vkSetEvent but instead is 0x%X. %s",
8827                             sourceStageMask, stageMask, validation_error_map[VALIDATION_ERROR_00254]);
8828    }
8829    return skip_call;
8830}
8831
8832// Note that we only check bits that HAVE required queueflags -- don't care entries are skipped
8833static std::unordered_map<VkPipelineStageFlags, VkQueueFlags> supported_pipeline_stages_table = {
8834    {VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT},
8835    {VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT},
8836    {VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, VK_QUEUE_GRAPHICS_BIT},
8837    {VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
8838    {VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
8839    {VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
8840    {VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
8841    {VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
8842    {VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT, VK_QUEUE_GRAPHICS_BIT},
8843    {VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT, VK_QUEUE_GRAPHICS_BIT},
8844    {VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_QUEUE_GRAPHICS_BIT},
8845    {VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_QUEUE_COMPUTE_BIT},
8846    {VK_PIPELINE_STAGE_TRANSFER_BIT, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT},
8847    {VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, VK_QUEUE_GRAPHICS_BIT}};
8848
8849static const VkPipelineStageFlags stage_flag_bit_array[] = {VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX,
8850                                                            VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT,
8851                                                            VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
8852                                                            VK_PIPELINE_STAGE_VERTEX_SHADER_BIT,
8853                                                            VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT,
8854                                                            VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT,
8855                                                            VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT,
8856                                                            VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
8857                                                            VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT,
8858                                                            VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
8859                                                            VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
8860                                                            VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
8861                                                            VK_PIPELINE_STAGE_TRANSFER_BIT,
8862                                                            VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT};
8863
8864bool CheckStageMaskQueueCompatibility(layer_data *dev_data, VkCommandBuffer command_buffer, VkPipelineStageFlags stage_mask,
8865                                      VkQueueFlags queue_flags, const char *function, const char *src_or_dest,
8866                                      UNIQUE_VALIDATION_ERROR_CODE error_code) {
8867    bool skip = false;
8868    // Lookup each bit in the stagemask and check for overlap between its table bits and queue_flags
8869    for (const auto &item : stage_flag_bit_array) {
8870        if (stage_mask & item) {
8871            if ((supported_pipeline_stages_table[item] & queue_flags) == 0) {
8872                skip |=
8873                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8874                            reinterpret_cast<uint64_t &>(command_buffer), __LINE__, error_code, "DL",
8875                            "%s(): %s flag %s is not compatible with the queue family properties of this "
8876                            "command buffer. %s",
8877                            function, src_or_dest, string_VkPipelineStageFlagBits(static_cast<VkPipelineStageFlagBits>(item)),
8878                            validation_error_map[error_code]);
8879            }
8880        }
8881    }
8882    return skip;
8883}
8884
8885bool ValidateStageMasksAgainstQueueCapabilities(layer_data *dev_data, GLOBAL_CB_NODE *cb_state,
8886                                                VkPipelineStageFlags source_stage_mask, VkPipelineStageFlags dest_stage_mask,
8887                                                const char *function, UNIQUE_VALIDATION_ERROR_CODE error_code) {
8888    bool skip = false;
8889    uint32_t queue_family_index = dev_data->commandPoolMap[cb_state->createInfo.commandPool].queueFamilyIndex;
8890    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(dev_data->physical_device), instance_layer_data_map);
8891    auto physical_device_state = getPhysicalDeviceState(instance_data, dev_data->physical_device);
8892
8893    // Any pipeline stage included in srcStageMask or dstStageMask must be supported by the capabilities of the queue family
8894    // specified by the queueFamilyIndex member of the VkCommandPoolCreateInfo structure that was used to create the VkCommandPool
8895    // that commandBuffer was allocated from, as specified in the table of supported pipeline stages.
8896
8897    if (queue_family_index < physical_device_state->queue_family_properties.size()) {
8898        VkQueueFlags specified_queue_flags = physical_device_state->queue_family_properties[queue_family_index].queueFlags;
8899
8900        if ((source_stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) == 0) {
8901            skip |= CheckStageMaskQueueCompatibility(dev_data, cb_state->commandBuffer, source_stage_mask, specified_queue_flags,
8902                                                     function, "srcStageMask", error_code);
8903        }
8904        if ((dest_stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) == 0) {
8905            skip |= CheckStageMaskQueueCompatibility(dev_data, cb_state->commandBuffer, dest_stage_mask, specified_queue_flags,
8906                                                     function, "dstStageMask", error_code);
8907        }
8908    }
8909    return skip;
8910}
8911
8912VKAPI_ATTR void VKAPI_CALL CmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
8913                                         VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask,
8914                                         uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
8915                                         uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
8916                                         uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
8917    bool skip = false;
8918    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8919    std::unique_lock<std::mutex> lock(global_lock);
8920    GLOBAL_CB_NODE *cb_state = getCBNode(dev_data, commandBuffer);
8921    if (cb_state) {
8922        skip |= ValidateStageMasksAgainstQueueCapabilities(dev_data, cb_state, sourceStageMask, dstStageMask, "vkCmdWaitEvents",
8923                                                           VALIDATION_ERROR_02510);
8924        skip |= ValidateStageMaskGsTsEnables(dev_data, sourceStageMask, "vkCmdWaitEvents()", VALIDATION_ERROR_02067,
8925                                             VALIDATION_ERROR_02069);
8926        skip |= ValidateStageMaskGsTsEnables(dev_data, dstStageMask, "vkCmdWaitEvents()", VALIDATION_ERROR_02068,
8927                                             VALIDATION_ERROR_02070);
8928        auto first_event_index = cb_state->events.size();
8929        for (uint32_t i = 0; i < eventCount; ++i) {
8930            auto event_state = getEventNode(dev_data, pEvents[i]);
8931            if (event_state) {
8932                addCommandBufferBinding(&event_state->cb_bindings,
8933                                        {reinterpret_cast<const uint64_t &>(pEvents[i]), VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT},
8934                                        cb_state);
8935                event_state->cb_bindings.insert(cb_state);
8936            }
8937            cb_state->waitedEvents.insert(pEvents[i]);
8938            cb_state->events.push_back(pEvents[i]);
8939        }
8940        std::function<bool(VkQueue)> event_update =
8941            std::bind(validateEventStageMask, std::placeholders::_1, cb_state, eventCount, first_event_index, sourceStageMask);
8942        cb_state->eventUpdates.push_back(event_update);
8943        if (cb_state->state == CB_RECORDING) {
8944            skip |= ValidateCmd(dev_data, cb_state, CMD_WAITEVENTS, "vkCmdWaitEvents()");
8945            UpdateCmdBufferLastCmd(dev_data, cb_state, CMD_WAITEVENTS);
8946        } else {
8947            skip |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWaitEvents()");
8948        }
8949        skip |= TransitionImageLayouts(dev_data, commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
8950        skip |= ValidateBarriers("vkCmdWaitEvents()", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8951                                 pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8952    }
8953    lock.unlock();
8954    if (!skip)
8955        dev_data->dispatch_table.CmdWaitEvents(commandBuffer, eventCount, pEvents, sourceStageMask, dstStageMask,
8956                                               memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
8957                                               imageMemoryBarrierCount, pImageMemoryBarriers);
8958}
8959
8960VKAPI_ATTR void VKAPI_CALL CmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
8961                                              VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
8962                                              uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
8963                                              uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
8964                                              uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
8965    bool skip = false;
8966    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8967    std::unique_lock<std::mutex> lock(global_lock);
8968    GLOBAL_CB_NODE *cb_state = getCBNode(dev_data, commandBuffer);
8969    if (cb_state) {
8970        skip |= ValidateStageMasksAgainstQueueCapabilities(dev_data, cb_state, srcStageMask, dstStageMask, "vkCmdPipelineBarrier",
8971                                                           VALIDATION_ERROR_02513);
8972        skip |= ValidateCmd(dev_data, cb_state, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()");
8973        skip |= ValidateStageMaskGsTsEnables(dev_data, srcStageMask, "vkCmdPipelineBarrier()", VALIDATION_ERROR_00265,
8974                                             VALIDATION_ERROR_00267);
8975        skip |= ValidateStageMaskGsTsEnables(dev_data, dstStageMask, "vkCmdPipelineBarrier()", VALIDATION_ERROR_00266,
8976                                             VALIDATION_ERROR_00268);
8977        UpdateCmdBufferLastCmd(dev_data, cb_state, CMD_PIPELINEBARRIER);
8978        skip |= TransitionImageLayouts(dev_data, commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
8979        skip |= ValidateBarriers("vkCmdPipelineBarrier()", commandBuffer, memoryBarrierCount, pMemoryBarriers,
8980                                 bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8981    }
8982    lock.unlock();
8983    if (!skip)
8984        dev_data->dispatch_table.CmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags, memoryBarrierCount,
8985                                                    pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
8986                                                    imageMemoryBarrierCount, pImageMemoryBarriers);
8987}
8988
8989bool setQueryState(VkQueue queue, VkCommandBuffer commandBuffer, QueryObject object, bool value) {
8990    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8991    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8992    if (pCB) {
8993        pCB->queryToStateMap[object] = value;
8994    }
8995    auto queue_data = dev_data->queueMap.find(queue);
8996    if (queue_data != dev_data->queueMap.end()) {
8997        queue_data->second.queryToStateMap[object] = value;
8998    }
8999    return false;
9000}
9001
9002VKAPI_ATTR void VKAPI_CALL CmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) {
9003    bool skip_call = false;
9004    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9005    std::unique_lock<std::mutex> lock(global_lock);
9006    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9007    if (pCB) {
9008        QueryObject query = {queryPool, slot};
9009        pCB->activeQueries.insert(query);
9010        if (!pCB->startedQueries.count(query)) {
9011            pCB->startedQueries.insert(query);
9012        }
9013        skip_call |= ValidateCmd(dev_data, pCB, CMD_BEGINQUERY, "vkCmdBeginQuery()");
9014        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_BEGINQUERY);
9015        addCommandBufferBinding(&getQueryPoolNode(dev_data, queryPool)->cb_bindings,
9016                                {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT}, pCB);
9017    }
9018    lock.unlock();
9019    if (!skip_call) dev_data->dispatch_table.CmdBeginQuery(commandBuffer, queryPool, slot, flags);
9020}
9021
9022VKAPI_ATTR void VKAPI_CALL CmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) {
9023    bool skip_call = false;
9024    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9025    std::unique_lock<std::mutex> lock(global_lock);
9026    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9027    if (pCB) {
9028        QueryObject query = {queryPool, slot};
9029        if (!pCB->activeQueries.count(query)) {
9030            skip_call |=
9031                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9032                        VALIDATION_ERROR_01041, "DS", "Ending a query before it was started: queryPool 0x%" PRIx64 ", index %d. %s",
9033                        (uint64_t)(queryPool), slot, validation_error_map[VALIDATION_ERROR_01041]);
9034        } else {
9035            pCB->activeQueries.erase(query);
9036        }
9037        std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, true);
9038        pCB->queryUpdates.push_back(queryUpdate);
9039        if (pCB->state == CB_RECORDING) {
9040            skip_call |= ValidateCmd(dev_data, pCB, CMD_ENDQUERY, "VkCmdEndQuery()");
9041            UpdateCmdBufferLastCmd(dev_data, pCB, CMD_ENDQUERY);
9042        } else {
9043            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdEndQuery()");
9044        }
9045        addCommandBufferBinding(&getQueryPoolNode(dev_data, queryPool)->cb_bindings,
9046                                {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT}, pCB);
9047    }
9048    lock.unlock();
9049    if (!skip_call) dev_data->dispatch_table.CmdEndQuery(commandBuffer, queryPool, slot);
9050}
9051
9052VKAPI_ATTR void VKAPI_CALL CmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
9053                                             uint32_t queryCount) {
9054    bool skip_call = false;
9055    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9056    std::unique_lock<std::mutex> lock(global_lock);
9057    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9058    if (pCB) {
9059        for (uint32_t i = 0; i < queryCount; i++) {
9060            QueryObject query = {queryPool, firstQuery + i};
9061            pCB->waitedEventsBeforeQueryReset[query] = pCB->waitedEvents;
9062            std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, false);
9063            pCB->queryUpdates.push_back(queryUpdate);
9064        }
9065        if (pCB->state == CB_RECORDING) {
9066            skip_call |= ValidateCmd(dev_data, pCB, CMD_RESETQUERYPOOL, "VkCmdResetQueryPool()");
9067            UpdateCmdBufferLastCmd(dev_data, pCB, CMD_RESETQUERYPOOL);
9068        } else {
9069            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdResetQueryPool()");
9070        }
9071        skip_call |= insideRenderPass(dev_data, pCB, "vkCmdResetQueryPool()", VALIDATION_ERROR_01025);
9072        addCommandBufferBinding(&getQueryPoolNode(dev_data, queryPool)->cb_bindings,
9073                                {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT}, pCB);
9074    }
9075    lock.unlock();
9076    if (!skip_call) dev_data->dispatch_table.CmdResetQueryPool(commandBuffer, queryPool, firstQuery, queryCount);
9077}
9078
9079bool validateQuery(VkQueue queue, GLOBAL_CB_NODE *pCB, VkQueryPool queryPool, uint32_t queryCount, uint32_t firstQuery) {
9080    bool skip_call = false;
9081    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(pCB->commandBuffer), layer_data_map);
9082    auto queue_data = dev_data->queueMap.find(queue);
9083    if (queue_data == dev_data->queueMap.end()) return false;
9084    for (uint32_t i = 0; i < queryCount; i++) {
9085        QueryObject query = {queryPool, firstQuery + i};
9086        auto query_data = queue_data->second.queryToStateMap.find(query);
9087        bool fail = false;
9088        if (query_data != queue_data->second.queryToStateMap.end()) {
9089            if (!query_data->second) {
9090                fail = true;
9091            }
9092        } else {
9093            auto global_query_data = dev_data->queryToStateMap.find(query);
9094            if (global_query_data != dev_data->queryToStateMap.end()) {
9095                if (!global_query_data->second) {
9096                    fail = true;
9097                }
9098            } else {
9099                fail = true;
9100            }
9101        }
9102        if (fail) {
9103            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9104                                 DRAWSTATE_INVALID_QUERY, "DS",
9105                                 "Requesting a copy from query to buffer with invalid query: queryPool 0x%" PRIx64 ", index %d",
9106                                 reinterpret_cast<uint64_t &>(queryPool), firstQuery + i);
9107        }
9108    }
9109    return skip_call;
9110}
9111
9112VKAPI_ATTR void VKAPI_CALL CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
9113                                                   uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset,
9114                                                   VkDeviceSize stride, VkQueryResultFlags flags) {
9115    bool skip_call = false;
9116    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9117    std::unique_lock<std::mutex> lock(global_lock);
9118
9119    auto cb_node = getCBNode(dev_data, commandBuffer);
9120    auto dst_buff_state = getBufferState(dev_data, dstBuffer);
9121    if (cb_node && dst_buff_state) {
9122        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_state, "vkCmdCopyQueryPoolResults()", VALIDATION_ERROR_02526);
9123        // Update bindings between buffer and cmd buffer
9124        AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_state);
9125        // Validate that DST buffer has correct usage flags set
9126        skip_call |=
9127            ValidateBufferUsageFlags(dev_data, dst_buff_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, VALIDATION_ERROR_01066,
9128                                     "vkCmdCopyQueryPoolResults()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
9129        std::function<bool()> function = [=]() {
9130            SetBufferMemoryValid(dev_data, dst_buff_state, true);
9131            return false;
9132        };
9133        cb_node->validate_functions.push_back(function);
9134        std::function<bool(VkQueue)> queryUpdate =
9135            std::bind(validateQuery, std::placeholders::_1, cb_node, queryPool, queryCount, firstQuery);
9136        cb_node->queryUpdates.push_back(queryUpdate);
9137        if (cb_node->state == CB_RECORDING) {
9138            skip_call |= ValidateCmd(dev_data, cb_node, CMD_COPYQUERYPOOLRESULTS, "vkCmdCopyQueryPoolResults()");
9139            UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_COPYQUERYPOOLRESULTS);
9140        } else {
9141            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdCopyQueryPoolResults()");
9142        }
9143        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyQueryPoolResults()", VALIDATION_ERROR_01074);
9144        addCommandBufferBinding(&getQueryPoolNode(dev_data, queryPool)->cb_bindings,
9145                                {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT}, cb_node);
9146    } else {
9147        assert(0);
9148    }
9149    lock.unlock();
9150    if (!skip_call)
9151        dev_data->dispatch_table.CmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer, dstOffset,
9152                                                         stride, flags);
9153}
9154
9155VKAPI_ATTR void VKAPI_CALL CmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout, VkShaderStageFlags stageFlags,
9156                                            uint32_t offset, uint32_t size, const void *pValues) {
9157    bool skip_call = false;
9158    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9159    std::unique_lock<std::mutex> lock(global_lock);
9160    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9161    if (pCB) {
9162        if (pCB->state == CB_RECORDING) {
9163            skip_call |= ValidateCmd(dev_data, pCB, CMD_PUSHCONSTANTS, "vkCmdPushConstants()");
9164            UpdateCmdBufferLastCmd(dev_data, pCB, CMD_PUSHCONSTANTS);
9165        } else {
9166            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdPushConstants()");
9167        }
9168    }
9169    skip_call |= validatePushConstantRange(dev_data, offset, size, "vkCmdPushConstants()");
9170    if (0 == stageFlags) {
9171        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9172                             VALIDATION_ERROR_00996, "DS", "vkCmdPushConstants() call has no stageFlags set. %s",
9173                             validation_error_map[VALIDATION_ERROR_00996]);
9174    }
9175
9176    // Check if push constant update is within any of the ranges with the same stage flags specified in pipeline layout.
9177    auto pipeline_layout = getPipelineLayout(dev_data, layout);
9178    // Coalesce adjacent/overlapping pipeline ranges before checking to see if incoming range is
9179    // contained in the pipeline ranges.
9180    // Build a {start, end} span list for ranges with matching stage flags.
9181    const auto &ranges = pipeline_layout->push_constant_ranges;
9182    struct span {
9183        uint32_t start;
9184        uint32_t end;
9185    };
9186    std::vector<span> spans;
9187    spans.reserve(ranges.size());
9188    for (const auto &iter : ranges) {
9189        if (iter.stageFlags == stageFlags) {
9190            spans.push_back({iter.offset, iter.offset + iter.size});
9191        }
9192    }
9193    if (spans.size() == 0) {
9194        // There were no ranges that matched the stageFlags.
9195        skip_call |=
9196            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9197                    VALIDATION_ERROR_00988, "DS", "vkCmdPushConstants() stageFlags = 0x%" PRIx32
9198                                                  " do not match "
9199                                                  "the stageFlags in any of the ranges in pipeline layout 0x%" PRIx64 ". %s",
9200                    (uint32_t)stageFlags, (uint64_t)layout, validation_error_map[VALIDATION_ERROR_00988]);
9201    } else {
9202        // Sort span list by start value.
9203        struct comparer {
9204            bool operator()(struct span i, struct span j) { return i.start < j.start; }
9205        } my_comparer;
9206        std::sort(spans.begin(), spans.end(), my_comparer);
9207
9208        // Examine two spans at a time.
9209        std::vector<span>::iterator current = spans.begin();
9210        std::vector<span>::iterator next = current + 1;
9211        while (next != spans.end()) {
9212            if (current->end < next->start) {
9213                // There is a gap; cannot coalesce. Move to the next two spans.
9214                ++current;
9215                ++next;
9216            } else {
9217                // Coalesce the two spans.  The start of the next span
9218                // is within the current span, so pick the larger of
9219                // the end values to extend the current span.
9220                // Then delete the next span and set next to the span after it.
9221                current->end = max(current->end, next->end);
9222                next = spans.erase(next);
9223            }
9224        }
9225
9226        // Now we can check if the incoming range is within any of the spans.
9227        bool contained_in_a_range = false;
9228        for (uint32_t i = 0; i < spans.size(); ++i) {
9229            if ((offset >= spans[i].start) && ((uint64_t)offset + (uint64_t)size <= (uint64_t)spans[i].end)) {
9230                contained_in_a_range = true;
9231                break;
9232            }
9233        }
9234        if (!contained_in_a_range) {
9235            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9236                                 VALIDATION_ERROR_00988, "DS",
9237                                 "vkCmdPushConstants() Push constant range [%d, %d) "
9238                                 "with stageFlags = 0x%" PRIx32
9239                                 " "
9240                                 "not within flag-matching ranges in pipeline layout 0x%" PRIx64 ". %s",
9241                                 offset, offset + size, (uint32_t)stageFlags, (uint64_t)layout,
9242                                 validation_error_map[VALIDATION_ERROR_00988]);
9243        }
9244    }
9245    lock.unlock();
9246    if (!skip_call) dev_data->dispatch_table.CmdPushConstants(commandBuffer, layout, stageFlags, offset, size, pValues);
9247}
9248
9249VKAPI_ATTR void VKAPI_CALL CmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage,
9250                                             VkQueryPool queryPool, uint32_t slot) {
9251    bool skip_call = false;
9252    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9253    std::unique_lock<std::mutex> lock(global_lock);
9254    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9255    if (pCB) {
9256        QueryObject query = {queryPool, slot};
9257        std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, true);
9258        pCB->queryUpdates.push_back(queryUpdate);
9259        if (pCB->state == CB_RECORDING) {
9260            skip_call |= ValidateCmd(dev_data, pCB, CMD_WRITETIMESTAMP, "vkCmdWriteTimestamp()");
9261            UpdateCmdBufferLastCmd(dev_data, pCB, CMD_WRITETIMESTAMP);
9262        } else {
9263            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWriteTimestamp()");
9264        }
9265    }
9266    lock.unlock();
9267    if (!skip_call) dev_data->dispatch_table.CmdWriteTimestamp(commandBuffer, pipelineStage, queryPool, slot);
9268}
9269
9270static bool MatchUsage(layer_data *dev_data, uint32_t count, const VkAttachmentReference *attachments,
9271                       const VkFramebufferCreateInfo *fbci, VkImageUsageFlagBits usage_flag,
9272                       UNIQUE_VALIDATION_ERROR_CODE error_code) {
9273    bool skip_call = false;
9274
9275    for (uint32_t attach = 0; attach < count; attach++) {
9276        if (attachments[attach].attachment != VK_ATTACHMENT_UNUSED) {
9277            // Attachment counts are verified elsewhere, but prevent an invalid access
9278            if (attachments[attach].attachment < fbci->attachmentCount) {
9279                const VkImageView *image_view = &fbci->pAttachments[attachments[attach].attachment];
9280                auto view_state = getImageViewState(dev_data, *image_view);
9281                if (view_state) {
9282                    const VkImageCreateInfo *ici = &getImageState(dev_data, view_state->create_info.image)->createInfo;
9283                    if (ici != nullptr) {
9284                        if ((ici->usage & usage_flag) == 0) {
9285                            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9286                                                 (VkDebugReportObjectTypeEXT)0, 0, __LINE__, error_code, "DS",
9287                                                 "vkCreateFramebuffer:  Framebuffer Attachment (%d) conflicts with the image's "
9288                                                 "IMAGE_USAGE flags (%s). %s",
9289                                                 attachments[attach].attachment, string_VkImageUsageFlagBits(usage_flag),
9290                                                 validation_error_map[error_code]);
9291                        }
9292                    }
9293                }
9294            }
9295        }
9296    }
9297    return skip_call;
9298}
9299
9300// Validate VkFramebufferCreateInfo which includes:
9301// 1. attachmentCount equals renderPass attachmentCount
9302// 2. corresponding framebuffer and renderpass attachments have matching formats
9303// 3. corresponding framebuffer and renderpass attachments have matching sample counts
9304// 4. fb attachments only have a single mip level
9305// 5. fb attachment dimensions are each at least as large as the fb
9306// 6. fb attachments use idenity swizzle
9307// 7. fb attachments used by renderPass for color/input/ds have correct usage bit set
9308// 8. fb dimensions are within physical device limits
9309static bool ValidateFramebufferCreateInfo(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) {
9310    bool skip_call = false;
9311
9312    auto rp_state = getRenderPassState(dev_data, pCreateInfo->renderPass);
9313    if (rp_state) {
9314        const VkRenderPassCreateInfo *rpci = rp_state->createInfo.ptr();
9315        if (rpci->attachmentCount != pCreateInfo->attachmentCount) {
9316            skip_call |= log_msg(
9317                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
9318                reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, VALIDATION_ERROR_00404, "DS",
9319                "vkCreateFramebuffer(): VkFramebufferCreateInfo attachmentCount of %u does not match attachmentCount of %u of "
9320                "renderPass (0x%" PRIxLEAST64 ") being used to create Framebuffer. %s",
9321                pCreateInfo->attachmentCount, rpci->attachmentCount, reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass),
9322                validation_error_map[VALIDATION_ERROR_00404]);
9323        } else {
9324            // attachmentCounts match, so make sure corresponding attachment details line up
9325            const VkImageView *image_views = pCreateInfo->pAttachments;
9326            for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
9327                auto view_state = getImageViewState(dev_data, image_views[i]);
9328                auto &ivci = view_state->create_info;
9329                if (ivci.format != rpci->pAttachments[i].format) {
9330                    skip_call |= log_msg(
9331                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
9332                        reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, VALIDATION_ERROR_00408, "DS",
9333                        "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has format of %s that does not match "
9334                        "the format of "
9335                        "%s used by the corresponding attachment for renderPass (0x%" PRIxLEAST64 "). %s",
9336                        i, string_VkFormat(ivci.format), string_VkFormat(rpci->pAttachments[i].format),
9337                        reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), validation_error_map[VALIDATION_ERROR_00408]);
9338                }
9339                const VkImageCreateInfo *ici = &getImageState(dev_data, ivci.image)->createInfo;
9340                if (ici->samples != rpci->pAttachments[i].samples) {
9341                    skip_call |= log_msg(
9342                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
9343                        reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, VALIDATION_ERROR_00409, "DS",
9344                        "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has %s samples that do not match "
9345                        "the %s samples used by the corresponding attachment for renderPass (0x%" PRIxLEAST64 "). %s",
9346                        i, string_VkSampleCountFlagBits(ici->samples), string_VkSampleCountFlagBits(rpci->pAttachments[i].samples),
9347                        reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), validation_error_map[VALIDATION_ERROR_00409]);
9348                }
9349                // Verify that view only has a single mip level
9350                if (ivci.subresourceRange.levelCount != 1) {
9351                    skip_call |=
9352                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
9353                                VALIDATION_ERROR_00411, "DS",
9354                                "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has mip levelCount of %u "
9355                                "but only a single mip level (levelCount ==  1) is allowed when creating a Framebuffer. %s",
9356                                i, ivci.subresourceRange.levelCount, validation_error_map[VALIDATION_ERROR_00411]);
9357                }
9358                const uint32_t mip_level = ivci.subresourceRange.baseMipLevel;
9359                uint32_t mip_width = max(1u, ici->extent.width >> mip_level);
9360                uint32_t mip_height = max(1u, ici->extent.height >> mip_level);
9361                if ((ivci.subresourceRange.layerCount < pCreateInfo->layers) || (mip_width < pCreateInfo->width) ||
9362                    (mip_height < pCreateInfo->height)) {
9363                    skip_call |=
9364                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
9365                                DRAWSTATE_INVALID_FRAMEBUFFER_CREATE_INFO, "DS",
9366                                "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has dimensions smaller "
9367                                "than the corresponding "
9368                                "framebuffer dimensions. Attachment dimensions must be at least as large. Here are the respective "
9369                                "dimensions for "
9370                                "attachment #%u, framebuffer:\n"
9371                                "width: %u, %u\n"
9372                                "height: %u, %u\n"
9373                                "layerCount: %u, %u\n",
9374                                i, ivci.subresourceRange.baseMipLevel, i, mip_width, pCreateInfo->width, mip_height,
9375                                pCreateInfo->height, ivci.subresourceRange.layerCount, pCreateInfo->layers);
9376                }
9377                if (((ivci.components.r != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.r != VK_COMPONENT_SWIZZLE_R)) ||
9378                    ((ivci.components.g != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.g != VK_COMPONENT_SWIZZLE_G)) ||
9379                    ((ivci.components.b != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.b != VK_COMPONENT_SWIZZLE_B)) ||
9380                    ((ivci.components.a != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.a != VK_COMPONENT_SWIZZLE_A))) {
9381                    skip_call |= log_msg(
9382                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
9383                        VALIDATION_ERROR_00412, "DS",
9384                        "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has non-identy swizzle. All framebuffer "
9385                        "attachments must have been created with the identity swizzle. Here are the actual swizzle values:\n"
9386                        "r swizzle = %s\n"
9387                        "g swizzle = %s\n"
9388                        "b swizzle = %s\n"
9389                        "a swizzle = %s\n"
9390                        "%s",
9391                        i, string_VkComponentSwizzle(ivci.components.r), string_VkComponentSwizzle(ivci.components.g),
9392                        string_VkComponentSwizzle(ivci.components.b), string_VkComponentSwizzle(ivci.components.a),
9393                        validation_error_map[VALIDATION_ERROR_00412]);
9394                }
9395            }
9396        }
9397        // Verify correct attachment usage flags
9398        for (uint32_t subpass = 0; subpass < rpci->subpassCount; subpass++) {
9399            // Verify input attachments:
9400            skip_call |=
9401                MatchUsage(dev_data, rpci->pSubpasses[subpass].inputAttachmentCount, rpci->pSubpasses[subpass].pInputAttachments,
9402                           pCreateInfo, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, VALIDATION_ERROR_00407);
9403            // Verify color attachments:
9404            skip_call |=
9405                MatchUsage(dev_data, rpci->pSubpasses[subpass].colorAttachmentCount, rpci->pSubpasses[subpass].pColorAttachments,
9406                           pCreateInfo, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VALIDATION_ERROR_00405);
9407            // Verify depth/stencil attachments:
9408            if (rpci->pSubpasses[subpass].pDepthStencilAttachment != nullptr) {
9409                skip_call |= MatchUsage(dev_data, 1, rpci->pSubpasses[subpass].pDepthStencilAttachment, pCreateInfo,
9410                                        VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VALIDATION_ERROR_00406);
9411            }
9412        }
9413    }
9414    // Verify FB dimensions are within physical device limits
9415    if (pCreateInfo->width > dev_data->phys_dev_properties.properties.limits.maxFramebufferWidth) {
9416        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
9417                             VALIDATION_ERROR_00413, "DS",
9418                             "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo width exceeds physical device limits. "
9419                             "Requested width: %u, device max: %u\n"
9420                             "%s",
9421                             pCreateInfo->width, dev_data->phys_dev_properties.properties.limits.maxFramebufferWidth,
9422                             validation_error_map[VALIDATION_ERROR_00413]);
9423    }
9424    if (pCreateInfo->height > dev_data->phys_dev_properties.properties.limits.maxFramebufferHeight) {
9425        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
9426                             VALIDATION_ERROR_00414, "DS",
9427                             "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo height exceeds physical device limits. "
9428                             "Requested height: %u, device max: %u\n"
9429                             "%s",
9430                             pCreateInfo->height, dev_data->phys_dev_properties.properties.limits.maxFramebufferHeight,
9431                             validation_error_map[VALIDATION_ERROR_00414]);
9432    }
9433    if (pCreateInfo->layers > dev_data->phys_dev_properties.properties.limits.maxFramebufferLayers) {
9434        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
9435                             VALIDATION_ERROR_00415, "DS",
9436                             "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo layers exceeds physical device limits. "
9437                             "Requested layers: %u, device max: %u\n"
9438                             "%s",
9439                             pCreateInfo->layers, dev_data->phys_dev_properties.properties.limits.maxFramebufferLayers,
9440                             validation_error_map[VALIDATION_ERROR_00415]);
9441    }
9442    return skip_call;
9443}
9444
9445// Validate VkFramebufferCreateInfo state prior to calling down chain to create Framebuffer object
9446//  Return true if an error is encountered and callback returns true to skip call down chain
9447//   false indicates that call down chain should proceed
9448static bool PreCallValidateCreateFramebuffer(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) {
9449    // TODO : Verify that renderPass FB is created with is compatible with FB
9450    bool skip_call = false;
9451    skip_call |= ValidateFramebufferCreateInfo(dev_data, pCreateInfo);
9452    return skip_call;
9453}
9454
9455// CreateFramebuffer state has been validated and call down chain completed so record new framebuffer object
9456static void PostCallRecordCreateFramebuffer(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo, VkFramebuffer fb) {
9457    // Shadow create info and store in map
9458    std::unique_ptr<FRAMEBUFFER_STATE> fb_state(
9459        new FRAMEBUFFER_STATE(fb, pCreateInfo, dev_data->renderPassMap[pCreateInfo->renderPass]->createInfo.ptr()));
9460
9461    for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
9462        VkImageView view = pCreateInfo->pAttachments[i];
9463        auto view_state = getImageViewState(dev_data, view);
9464        if (!view_state) {
9465            continue;
9466        }
9467        MT_FB_ATTACHMENT_INFO fb_info;
9468        fb_info.mem = getImageState(dev_data, view_state->create_info.image)->binding.mem;
9469        fb_info.view_state = view_state;
9470        fb_info.image = view_state->create_info.image;
9471        fb_state->attachments.push_back(fb_info);
9472    }
9473    dev_data->frameBufferMap[fb] = std::move(fb_state);
9474}
9475
9476VKAPI_ATTR VkResult VKAPI_CALL CreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo,
9477                                                 const VkAllocationCallbacks *pAllocator, VkFramebuffer *pFramebuffer) {
9478    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9479    std::unique_lock<std::mutex> lock(global_lock);
9480    bool skip_call = PreCallValidateCreateFramebuffer(dev_data, pCreateInfo);
9481    lock.unlock();
9482
9483    if (skip_call) return VK_ERROR_VALIDATION_FAILED_EXT;
9484
9485    VkResult result = dev_data->dispatch_table.CreateFramebuffer(device, pCreateInfo, pAllocator, pFramebuffer);
9486
9487    if (VK_SUCCESS == result) {
9488        lock.lock();
9489        PostCallRecordCreateFramebuffer(dev_data, pCreateInfo, *pFramebuffer);
9490        lock.unlock();
9491    }
9492    return result;
9493}
9494
9495static bool FindDependency(const int index, const int dependent, const std::vector<DAGNode> &subpass_to_node,
9496                           std::unordered_set<uint32_t> &processed_nodes) {
9497    // If we have already checked this node we have not found a dependency path so return false.
9498    if (processed_nodes.count(index)) return false;
9499    processed_nodes.insert(index);
9500    const DAGNode &node = subpass_to_node[index];
9501    // Look for a dependency path. If one exists return true else recurse on the previous nodes.
9502    if (std::find(node.prev.begin(), node.prev.end(), static_cast<uint32_t>(dependent)) == node.prev.end()) {
9503        for (auto elem : node.prev) {
9504            if (FindDependency(elem, dependent, subpass_to_node, processed_nodes)) return true;
9505        }
9506    } else {
9507        return true;
9508    }
9509    return false;
9510}
9511
9512static bool CheckDependencyExists(const layer_data *dev_data, const int subpass, const std::vector<uint32_t> &dependent_subpasses,
9513                                  const std::vector<DAGNode> &subpass_to_node, bool &skip_call) {
9514    bool result = true;
9515    // Loop through all subpasses that share the same attachment and make sure a dependency exists
9516    for (uint32_t k = 0; k < dependent_subpasses.size(); ++k) {
9517        if (static_cast<uint32_t>(subpass) == dependent_subpasses[k]) continue;
9518        const DAGNode &node = subpass_to_node[subpass];
9519        // Check for a specified dependency between the two nodes. If one exists we are done.
9520        auto prev_elem = std::find(node.prev.begin(), node.prev.end(), dependent_subpasses[k]);
9521        auto next_elem = std::find(node.next.begin(), node.next.end(), dependent_subpasses[k]);
9522        if (prev_elem == node.prev.end() && next_elem == node.next.end()) {
9523            // If no dependency exits an implicit dependency still might. If not, throw an error.
9524            std::unordered_set<uint32_t> processed_nodes;
9525            if (!(FindDependency(subpass, dependent_subpasses[k], subpass_to_node, processed_nodes) ||
9526                  FindDependency(dependent_subpasses[k], subpass, subpass_to_node, processed_nodes))) {
9527                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9528                                     __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9529                                     "A dependency between subpasses %d and %d must exist but one is not specified.", subpass,
9530                                     dependent_subpasses[k]);
9531                result = false;
9532            }
9533        }
9534    }
9535    return result;
9536}
9537
9538static bool CheckPreserved(const layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo, const int index,
9539                           const uint32_t attachment, const std::vector<DAGNode> &subpass_to_node, int depth, bool &skip_call) {
9540    const DAGNode &node = subpass_to_node[index];
9541    // If this node writes to the attachment return true as next nodes need to preserve the attachment.
9542    const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
9543    for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9544        if (attachment == subpass.pColorAttachments[j].attachment) return true;
9545    }
9546    if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9547        if (attachment == subpass.pDepthStencilAttachment->attachment) return true;
9548    }
9549    bool result = false;
9550    // Loop through previous nodes and see if any of them write to the attachment.
9551    for (auto elem : node.prev) {
9552        result |= CheckPreserved(dev_data, pCreateInfo, elem, attachment, subpass_to_node, depth + 1, skip_call);
9553    }
9554    // If the attachment was written to by a previous node than this node needs to preserve it.
9555    if (result && depth > 0) {
9556        bool has_preserved = false;
9557        for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
9558            if (subpass.pPreserveAttachments[j] == attachment) {
9559                has_preserved = true;
9560                break;
9561            }
9562        }
9563        if (!has_preserved) {
9564            skip_call |=
9565                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9566                        DRAWSTATE_INVALID_RENDERPASS, "DS",
9567                        "Attachment %d is used by a later subpass and must be preserved in subpass %d.", attachment, index);
9568        }
9569    }
9570    return result;
9571}
9572
9573template <class T>
9574bool isRangeOverlapping(T offset1, T size1, T offset2, T size2) {
9575    return (((offset1 + size1) > offset2) && ((offset1 + size1) < (offset2 + size2))) ||
9576           ((offset1 > offset2) && (offset1 < (offset2 + size2)));
9577}
9578
9579bool isRegionOverlapping(VkImageSubresourceRange range1, VkImageSubresourceRange range2) {
9580    return (isRangeOverlapping(range1.baseMipLevel, range1.levelCount, range2.baseMipLevel, range2.levelCount) &&
9581            isRangeOverlapping(range1.baseArrayLayer, range1.layerCount, range2.baseArrayLayer, range2.layerCount));
9582}
9583
9584static bool ValidateDependencies(const layer_data *dev_data, FRAMEBUFFER_STATE const *framebuffer,
9585                                 RENDER_PASS_STATE const *renderPass) {
9586    bool skip_call = false;
9587    auto const pFramebufferInfo = framebuffer->createInfo.ptr();
9588    auto const pCreateInfo = renderPass->createInfo.ptr();
9589    auto const &subpass_to_node = renderPass->subpassToNode;
9590    std::vector<std::vector<uint32_t>> output_attachment_to_subpass(pCreateInfo->attachmentCount);
9591    std::vector<std::vector<uint32_t>> input_attachment_to_subpass(pCreateInfo->attachmentCount);
9592    std::vector<std::vector<uint32_t>> overlapping_attachments(pCreateInfo->attachmentCount);
9593    // Find overlapping attachments
9594    for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
9595        for (uint32_t j = i + 1; j < pCreateInfo->attachmentCount; ++j) {
9596            VkImageView viewi = pFramebufferInfo->pAttachments[i];
9597            VkImageView viewj = pFramebufferInfo->pAttachments[j];
9598            if (viewi == viewj) {
9599                overlapping_attachments[i].push_back(j);
9600                overlapping_attachments[j].push_back(i);
9601                continue;
9602            }
9603            auto view_state_i = getImageViewState(dev_data, viewi);
9604            auto view_state_j = getImageViewState(dev_data, viewj);
9605            if (!view_state_i || !view_state_j) {
9606                continue;
9607            }
9608            auto view_ci_i = view_state_i->create_info;
9609            auto view_ci_j = view_state_j->create_info;
9610            if (view_ci_i.image == view_ci_j.image && isRegionOverlapping(view_ci_i.subresourceRange, view_ci_j.subresourceRange)) {
9611                overlapping_attachments[i].push_back(j);
9612                overlapping_attachments[j].push_back(i);
9613                continue;
9614            }
9615            auto image_data_i = getImageState(dev_data, view_ci_i.image);
9616            auto image_data_j = getImageState(dev_data, view_ci_j.image);
9617            if (!image_data_i || !image_data_j) {
9618                continue;
9619            }
9620            if (image_data_i->binding.mem == image_data_j->binding.mem &&
9621                isRangeOverlapping(image_data_i->binding.offset, image_data_i->binding.size, image_data_j->binding.offset,
9622                                   image_data_j->binding.size)) {
9623                overlapping_attachments[i].push_back(j);
9624                overlapping_attachments[j].push_back(i);
9625            }
9626        }
9627    }
9628    for (uint32_t i = 0; i < overlapping_attachments.size(); ++i) {
9629        uint32_t attachment = i;
9630        for (auto other_attachment : overlapping_attachments[i]) {
9631            if (!(pCreateInfo->pAttachments[attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
9632                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9633                                     __LINE__, VALIDATION_ERROR_00324, "DS",
9634                                     "Attachment %d aliases attachment %d but doesn't "
9635                                     "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT. %s",
9636                                     attachment, other_attachment, validation_error_map[VALIDATION_ERROR_00324]);
9637            }
9638            if (!(pCreateInfo->pAttachments[other_attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
9639                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9640                                     __LINE__, VALIDATION_ERROR_00324, "DS",
9641                                     "Attachment %d aliases attachment %d but doesn't "
9642                                     "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT. %s",
9643                                     other_attachment, attachment, validation_error_map[VALIDATION_ERROR_00324]);
9644            }
9645        }
9646    }
9647    // Find for each attachment the subpasses that use them.
9648    unordered_set<uint32_t> attachmentIndices;
9649    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9650        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9651        attachmentIndices.clear();
9652        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9653            uint32_t attachment = subpass.pInputAttachments[j].attachment;
9654            if (attachment == VK_ATTACHMENT_UNUSED) continue;
9655            input_attachment_to_subpass[attachment].push_back(i);
9656            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
9657                input_attachment_to_subpass[overlapping_attachment].push_back(i);
9658            }
9659        }
9660        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9661            uint32_t attachment = subpass.pColorAttachments[j].attachment;
9662            if (attachment == VK_ATTACHMENT_UNUSED) continue;
9663            output_attachment_to_subpass[attachment].push_back(i);
9664            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
9665                output_attachment_to_subpass[overlapping_attachment].push_back(i);
9666            }
9667            attachmentIndices.insert(attachment);
9668        }
9669        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9670            uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
9671            output_attachment_to_subpass[attachment].push_back(i);
9672            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
9673                output_attachment_to_subpass[overlapping_attachment].push_back(i);
9674            }
9675
9676            if (attachmentIndices.count(attachment)) {
9677                skip_call |=
9678                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9679                            DRAWSTATE_INVALID_RENDERPASS, "DS",
9680                            "Cannot use same attachment (%u) as both color and depth output in same subpass (%u).", attachment, i);
9681            }
9682        }
9683    }
9684    // If there is a dependency needed make sure one exists
9685    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9686        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9687        // If the attachment is an input then all subpasses that output must have a dependency relationship
9688        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9689            uint32_t attachment = subpass.pInputAttachments[j].attachment;
9690            if (attachment == VK_ATTACHMENT_UNUSED) continue;
9691            CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9692        }
9693        // If the attachment is an output then all subpasses that use the attachment must have a dependency relationship
9694        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9695            uint32_t attachment = subpass.pColorAttachments[j].attachment;
9696            if (attachment == VK_ATTACHMENT_UNUSED) continue;
9697            CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9698            CheckDependencyExists(dev_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9699        }
9700        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9701            const uint32_t &attachment = subpass.pDepthStencilAttachment->attachment;
9702            CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9703            CheckDependencyExists(dev_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9704        }
9705    }
9706    // Loop through implicit dependencies, if this pass reads make sure the attachment is preserved for all passes after it was
9707    // written.
9708    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9709        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9710        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9711            CheckPreserved(dev_data, pCreateInfo, i, subpass.pInputAttachments[j].attachment, subpass_to_node, 0, skip_call);
9712        }
9713    }
9714    return skip_call;
9715}
9716// ValidateLayoutVsAttachmentDescription is a general function where we can validate various state associated with the
9717// VkAttachmentDescription structs that are used by the sub-passes of a renderpass. Initial check is to make sure that
9718// READ_ONLY layout attachments don't have CLEAR as their loadOp.
9719static bool ValidateLayoutVsAttachmentDescription(debug_report_data *report_data, const VkImageLayout first_layout,
9720                                                  const uint32_t attachment,
9721                                                  const VkAttachmentDescription &attachment_description) {
9722    bool skip_call = false;
9723    // Verify that initial loadOp on READ_ONLY attachments is not CLEAR
9724    if (attachment_description.loadOp == VK_ATTACHMENT_LOAD_OP_CLEAR) {
9725        if ((first_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) ||
9726            (first_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL)) {
9727            skip_call |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
9728                                 VkDebugReportObjectTypeEXT(0), __LINE__, VALIDATION_ERROR_02351, "DS",
9729                                 "Cannot clear attachment %d with invalid first layout %s. %s", attachment,
9730                                 string_VkImageLayout(first_layout), validation_error_map[VALIDATION_ERROR_02351]);
9731        }
9732    }
9733    return skip_call;
9734}
9735
9736static bool ValidateLayouts(const layer_data *dev_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo) {
9737    bool skip = false;
9738
9739    // Track when we're observing the first use of an attachment
9740    std::vector<bool> attach_first_use(pCreateInfo->attachmentCount, true);
9741    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9742        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9743        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9744            auto attach_index = subpass.pColorAttachments[j].attachment;
9745            if (attach_index == VK_ATTACHMENT_UNUSED) continue;
9746
9747            switch (subpass.pColorAttachments[j].layout) {
9748                case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
9749                    // This is ideal.
9750                    break;
9751
9752                case VK_IMAGE_LAYOUT_GENERAL:
9753                    // May not be optimal; TODO: reconsider this warning based on other constraints?
9754                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
9755                                    VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9756                                    "Layout for color attachment is GENERAL but should be COLOR_ATTACHMENT_OPTIMAL.");
9757                    break;
9758
9759                default:
9760                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
9761                                    0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9762                                    "Layout for color attachment is %s but can only be COLOR_ATTACHMENT_OPTIMAL or GENERAL.",
9763                                    string_VkImageLayout(subpass.pColorAttachments[j].layout));
9764            }
9765
9766            if (attach_first_use[attach_index]) {
9767                skip |= ValidateLayoutVsAttachmentDescription(dev_data->report_data, subpass.pColorAttachments[j].layout,
9768                                                              attach_index, pCreateInfo->pAttachments[attach_index]);
9769            }
9770            attach_first_use[attach_index] = false;
9771        }
9772        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9773            switch (subpass.pDepthStencilAttachment->layout) {
9774                case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
9775                case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
9776                    // These are ideal.
9777                    break;
9778
9779                case VK_IMAGE_LAYOUT_GENERAL:
9780                    // May not be optimal; TODO: reconsider this warning based on other constraints? GENERAL can be better than
9781                    // doing
9782                    // a bunch of transitions.
9783                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
9784                                    VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9785                                    "GENERAL layout for depth attachment may not give optimal performance.");
9786                    break;
9787
9788                default:
9789                    // No other layouts are acceptable
9790                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
9791                                    0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9792                                    "Layout for depth attachment is %s but can only be DEPTH_STENCIL_ATTACHMENT_OPTIMAL, "
9793                                    "DEPTH_STENCIL_READ_ONLY_OPTIMAL or GENERAL.",
9794                                    string_VkImageLayout(subpass.pDepthStencilAttachment->layout));
9795            }
9796
9797            auto attach_index = subpass.pDepthStencilAttachment->attachment;
9798            if (attach_first_use[attach_index]) {
9799                skip |= ValidateLayoutVsAttachmentDescription(dev_data->report_data, subpass.pDepthStencilAttachment->layout,
9800                                                              attach_index, pCreateInfo->pAttachments[attach_index]);
9801            }
9802            attach_first_use[attach_index] = false;
9803        }
9804        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9805            auto attach_index = subpass.pInputAttachments[j].attachment;
9806            if (attach_index == VK_ATTACHMENT_UNUSED) continue;
9807
9808            switch (subpass.pInputAttachments[j].layout) {
9809                case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
9810                case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
9811                    // These are ideal.
9812                    break;
9813
9814                case VK_IMAGE_LAYOUT_GENERAL:
9815                    // May not be optimal. TODO: reconsider this warning based on other constraints.
9816                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
9817                                    VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9818                                    "Layout for input attachment is GENERAL but should be READ_ONLY_OPTIMAL.");
9819                    break;
9820
9821                default:
9822                    // No other layouts are acceptable
9823                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9824                                    __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9825                                    "Layout for input attachment is %s but can only be READ_ONLY_OPTIMAL or GENERAL.",
9826                                    string_VkImageLayout(subpass.pInputAttachments[j].layout));
9827            }
9828
9829            if (attach_first_use[attach_index]) {
9830                skip |= ValidateLayoutVsAttachmentDescription(dev_data->report_data, subpass.pInputAttachments[j].layout,
9831                                                              attach_index, pCreateInfo->pAttachments[attach_index]);
9832            }
9833            attach_first_use[attach_index] = false;
9834        }
9835    }
9836    return skip;
9837}
9838
9839static bool CreatePassDAG(const layer_data *dev_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
9840                          std::vector<DAGNode> &subpass_to_node, std::vector<bool> &has_self_dependency) {
9841    bool skip_call = false;
9842    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9843        DAGNode &subpass_node = subpass_to_node[i];
9844        subpass_node.pass = i;
9845    }
9846    for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
9847        const VkSubpassDependency &dependency = pCreateInfo->pDependencies[i];
9848        if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL || dependency.dstSubpass == VK_SUBPASS_EXTERNAL) {
9849            if (dependency.srcSubpass == dependency.dstSubpass) {
9850                skip_call |=
9851                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9852                            DRAWSTATE_INVALID_RENDERPASS, "DS", "The src and dest subpasses cannot both be external.");
9853            }
9854        } else if (dependency.srcSubpass > dependency.dstSubpass) {
9855            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9856                                 DRAWSTATE_INVALID_RENDERPASS, "DS",
9857                                 "Depedency graph must be specified such that an earlier pass cannot depend on a later pass.");
9858        } else if (dependency.srcSubpass == dependency.dstSubpass) {
9859            has_self_dependency[dependency.srcSubpass] = true;
9860        } else {
9861            subpass_to_node[dependency.dstSubpass].prev.push_back(dependency.srcSubpass);
9862            subpass_to_node[dependency.srcSubpass].next.push_back(dependency.dstSubpass);
9863        }
9864    }
9865    return skip_call;
9866}
9867
9868VKAPI_ATTR VkResult VKAPI_CALL CreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo,
9869                                                  const VkAllocationCallbacks *pAllocator, VkShaderModule *pShaderModule) {
9870    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9871    bool skip_call = false;
9872    spv_result_t spv_valid = SPV_SUCCESS;
9873
9874    if (!GetDisables(dev_data)->shader_validation) {
9875        // Use SPIRV-Tools validator to try and catch any issues with the module itself
9876        spv_context ctx = spvContextCreate(SPV_ENV_VULKAN_1_0);
9877        spv_const_binary_t binary{pCreateInfo->pCode, pCreateInfo->codeSize / sizeof(uint32_t)};
9878        spv_diagnostic diag = nullptr;
9879
9880        spv_valid = spvValidate(ctx, &binary, &diag);
9881        if (spv_valid != SPV_SUCCESS) {
9882            if (!dev_data->device_extensions.nv_glsl_shader_enabled || (pCreateInfo->pCode[0] == spv::MagicNumber)) {
9883                skip_call |= log_msg(dev_data->report_data,
9884                    spv_valid == SPV_WARNING ? VK_DEBUG_REPORT_WARNING_BIT_EXT : VK_DEBUG_REPORT_ERROR_BIT_EXT,
9885                    VkDebugReportObjectTypeEXT(0), 0, __LINE__, SHADER_CHECKER_INCONSISTENT_SPIRV, "SC",
9886                    "SPIR-V module not valid: %s", diag && diag->error ? diag->error : "(no error text)");
9887            }
9888        }
9889
9890        spvDiagnosticDestroy(diag);
9891        spvContextDestroy(ctx);
9892
9893        if (skip_call) return VK_ERROR_VALIDATION_FAILED_EXT;
9894    }
9895
9896    VkResult res = dev_data->dispatch_table.CreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule);
9897
9898    if (res == VK_SUCCESS && !GetDisables(dev_data)->shader_validation) {
9899        std::lock_guard<std::mutex> lock(global_lock);
9900        const auto new_shader_module = (SPV_SUCCESS == spv_valid ? new shader_module(pCreateInfo) : new shader_module());
9901        dev_data->shaderModuleMap[*pShaderModule] = unique_ptr<shader_module>(new_shader_module);
9902    }
9903    return res;
9904}
9905
9906static bool ValidateAttachmentIndex(layer_data *dev_data, uint32_t attachment, uint32_t attachment_count, const char *type) {
9907    bool skip_call = false;
9908    if (attachment >= attachment_count && attachment != VK_ATTACHMENT_UNUSED) {
9909        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9910                             VALIDATION_ERROR_00325, "DS",
9911                             "CreateRenderPass: %s attachment %d must be less than the total number of attachments %d. %s", type,
9912                             attachment, attachment_count, validation_error_map[VALIDATION_ERROR_00325]);
9913    }
9914    return skip_call;
9915}
9916
9917static bool IsPowerOfTwo(unsigned x) { return x && !(x & (x - 1)); }
9918
9919static bool ValidateRenderpassAttachmentUsage(layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo) {
9920    bool skip_call = false;
9921    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9922        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9923        if (subpass.pipelineBindPoint != VK_PIPELINE_BIND_POINT_GRAPHICS) {
9924            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9925                                 VALIDATION_ERROR_00347, "DS",
9926                                 "CreateRenderPass: Pipeline bind point for subpass %d must be VK_PIPELINE_BIND_POINT_GRAPHICS. %s",
9927                                 i, validation_error_map[VALIDATION_ERROR_00347]);
9928        }
9929        for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
9930            uint32_t attachment = subpass.pPreserveAttachments[j];
9931            if (attachment == VK_ATTACHMENT_UNUSED) {
9932                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9933                                     __LINE__, VALIDATION_ERROR_00356, "DS",
9934                                     "CreateRenderPass:  Preserve attachment (%d) must not be VK_ATTACHMENT_UNUSED. %s", j,
9935                                     validation_error_map[VALIDATION_ERROR_00356]);
9936            } else {
9937                skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Preserve");
9938            }
9939        }
9940
9941        auto subpass_performs_resolve =
9942            subpass.pResolveAttachments &&
9943            std::any_of(subpass.pResolveAttachments, subpass.pResolveAttachments + subpass.colorAttachmentCount,
9944                        [](VkAttachmentReference ref) { return ref.attachment != VK_ATTACHMENT_UNUSED; });
9945
9946        unsigned sample_count = 0;
9947
9948        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9949            uint32_t attachment;
9950            if (subpass.pResolveAttachments) {
9951                attachment = subpass.pResolveAttachments[j].attachment;
9952                skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Resolve");
9953
9954                if (!skip_call && attachment != VK_ATTACHMENT_UNUSED &&
9955                    pCreateInfo->pAttachments[attachment].samples != VK_SAMPLE_COUNT_1_BIT) {
9956                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
9957                                         __LINE__, VALIDATION_ERROR_00352, "DS",
9958                                         "CreateRenderPass:  Subpass %u requests multisample resolve into attachment %u, "
9959                                         "which must have VK_SAMPLE_COUNT_1_BIT but has %s. %s",
9960                                         i, attachment, string_VkSampleCountFlagBits(pCreateInfo->pAttachments[attachment].samples),
9961                                         validation_error_map[VALIDATION_ERROR_00352]);
9962                }
9963            }
9964            attachment = subpass.pColorAttachments[j].attachment;
9965            skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Color");
9966
9967            if (!skip_call && attachment != VK_ATTACHMENT_UNUSED) {
9968                sample_count |= (unsigned)pCreateInfo->pAttachments[attachment].samples;
9969
9970                if (subpass_performs_resolve && pCreateInfo->pAttachments[attachment].samples == VK_SAMPLE_COUNT_1_BIT) {
9971                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
9972                                         __LINE__, VALIDATION_ERROR_00351, "DS",
9973                                         "CreateRenderPass:  Subpass %u requests multisample resolve from attachment %u "
9974                                         "which has VK_SAMPLE_COUNT_1_BIT. %s",
9975                                         i, attachment, validation_error_map[VALIDATION_ERROR_00351]);
9976                }
9977            }
9978        }
9979
9980        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9981            uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
9982            skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Depth stencil");
9983
9984            if (!skip_call && attachment != VK_ATTACHMENT_UNUSED) {
9985                sample_count |= (unsigned)pCreateInfo->pAttachments[attachment].samples;
9986            }
9987        }
9988
9989        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9990            uint32_t attachment = subpass.pInputAttachments[j].attachment;
9991            skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Input");
9992        }
9993
9994        if (sample_count && !IsPowerOfTwo(sample_count)) {
9995            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
9996                                 VALIDATION_ERROR_00337, "DS",
9997                                 "CreateRenderPass:  Subpass %u attempts to render to "
9998                                 "attachments with inconsistent sample counts. %s",
9999                                 i, validation_error_map[VALIDATION_ERROR_00337]);
10000        }
10001    }
10002    return skip_call;
10003}
10004
10005VKAPI_ATTR VkResult VKAPI_CALL CreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
10006                                                const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) {
10007    bool skip_call = false;
10008    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10009
10010    std::unique_lock<std::mutex> lock(global_lock);
10011    // TODO: As part of wrapping up the mem_tracker/core_validation merge the following routine should be consolidated with
10012    //       ValidateLayouts.
10013    skip_call |= ValidateRenderpassAttachmentUsage(dev_data, pCreateInfo);
10014    for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
10015        skip_call |= ValidateStageMaskGsTsEnables(dev_data, pCreateInfo->pDependencies[i].srcStageMask, "vkCreateRenderPass()",
10016                                                  VALIDATION_ERROR_00368, VALIDATION_ERROR_00370);
10017        skip_call |= ValidateStageMaskGsTsEnables(dev_data, pCreateInfo->pDependencies[i].dstStageMask, "vkCreateRenderPass()",
10018                                                  VALIDATION_ERROR_00369, VALIDATION_ERROR_00371);
10019    }
10020    if (!skip_call) {
10021        skip_call |= ValidateLayouts(dev_data, device, pCreateInfo);
10022    }
10023    lock.unlock();
10024
10025    if (skip_call) {
10026        return VK_ERROR_VALIDATION_FAILED_EXT;
10027    }
10028
10029    VkResult result = dev_data->dispatch_table.CreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass);
10030
10031    if (VK_SUCCESS == result) {
10032        lock.lock();
10033
10034        std::vector<bool> has_self_dependency(pCreateInfo->subpassCount);
10035        std::vector<DAGNode> subpass_to_node(pCreateInfo->subpassCount);
10036        skip_call |= CreatePassDAG(dev_data, device, pCreateInfo, subpass_to_node, has_self_dependency);
10037
10038        auto render_pass = unique_ptr<RENDER_PASS_STATE>(new RENDER_PASS_STATE(pCreateInfo));
10039        render_pass->renderPass = *pRenderPass;
10040        render_pass->hasSelfDependency = has_self_dependency;
10041        render_pass->subpassToNode = subpass_to_node;
10042
10043        // TODO: Maybe fill list and then copy instead of locking
10044        std::unordered_map<uint32_t, bool> &attachment_first_read = render_pass->attachment_first_read;
10045        std::unordered_map<uint32_t, VkImageLayout> &attachment_first_layout = render_pass->attachment_first_layout;
10046        for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
10047            const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
10048            for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
10049                uint32_t attachment = subpass.pColorAttachments[j].attachment;
10050                if (!attachment_first_read.count(attachment)) {
10051                    attachment_first_read.insert(std::make_pair(attachment, false));
10052                    attachment_first_layout.insert(std::make_pair(attachment, subpass.pColorAttachments[j].layout));
10053                }
10054            }
10055            if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
10056                uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
10057                if (!attachment_first_read.count(attachment)) {
10058                    attachment_first_read.insert(std::make_pair(attachment, false));
10059                    attachment_first_layout.insert(std::make_pair(attachment, subpass.pDepthStencilAttachment->layout));
10060                }
10061            }
10062            for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
10063                uint32_t attachment = subpass.pInputAttachments[j].attachment;
10064                if (!attachment_first_read.count(attachment)) {
10065                    attachment_first_read.insert(std::make_pair(attachment, true));
10066                    attachment_first_layout.insert(std::make_pair(attachment, subpass.pInputAttachments[j].layout));
10067                }
10068            }
10069        }
10070
10071        dev_data->renderPassMap[*pRenderPass] = std::move(render_pass);
10072    }
10073    return result;
10074}
10075
10076static bool validatePrimaryCommandBuffer(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const std::string &cmd_name,
10077                                         UNIQUE_VALIDATION_ERROR_CODE error_code) {
10078    bool skip_call = false;
10079    if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
10080        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10081                             error_code, "DS", "Cannot execute command %s on a secondary command buffer. %s", cmd_name.c_str(),
10082                             validation_error_map[error_code]);
10083    }
10084    return skip_call;
10085}
10086
10087static bool VerifyRenderAreaBounds(const layer_data *dev_data, const VkRenderPassBeginInfo *pRenderPassBegin) {
10088    bool skip_call = false;
10089    const safe_VkFramebufferCreateInfo *pFramebufferInfo =
10090        &getFramebufferState(dev_data, pRenderPassBegin->framebuffer)->createInfo;
10091    if (pRenderPassBegin->renderArea.offset.x < 0 ||
10092        (pRenderPassBegin->renderArea.offset.x + pRenderPassBegin->renderArea.extent.width) > pFramebufferInfo->width ||
10093        pRenderPassBegin->renderArea.offset.y < 0 ||
10094        (pRenderPassBegin->renderArea.offset.y + pRenderPassBegin->renderArea.extent.height) > pFramebufferInfo->height) {
10095        skip_call |= static_cast<bool>(log_msg(
10096            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10097            DRAWSTATE_INVALID_RENDER_AREA, "CORE",
10098            "Cannot execute a render pass with renderArea not within the bound of the "
10099            "framebuffer. RenderArea: x %d, y %d, width %d, height %d. Framebuffer: width %d, "
10100            "height %d.",
10101            pRenderPassBegin->renderArea.offset.x, pRenderPassBegin->renderArea.offset.y, pRenderPassBegin->renderArea.extent.width,
10102            pRenderPassBegin->renderArea.extent.height, pFramebufferInfo->width, pFramebufferInfo->height));
10103    }
10104    return skip_call;
10105}
10106
10107// If this is a stencil format, make sure the stencil[Load|Store]Op flag is checked, while if it is a depth/color attachment the
10108// [load|store]Op flag must be checked
10109// TODO: The memory valid flag in DEVICE_MEM_INFO should probably be split to track the validity of stencil memory separately.
10110template <typename T>
10111static bool FormatSpecificLoadAndStoreOpSettings(VkFormat format, T color_depth_op, T stencil_op, T op) {
10112    if (color_depth_op != op && stencil_op != op) {
10113        return false;
10114    }
10115    bool check_color_depth_load_op = !vk_format_is_stencil_only(format);
10116    bool check_stencil_load_op = vk_format_is_depth_and_stencil(format) || !check_color_depth_load_op;
10117
10118    return (((check_color_depth_load_op == true) && (color_depth_op == op)) ||
10119            ((check_stencil_load_op == true) && (stencil_op == op)));
10120}
10121
10122VKAPI_ATTR void VKAPI_CALL CmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
10123                                              VkSubpassContents contents) {
10124    bool skip_call = false;
10125    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
10126    std::unique_lock<std::mutex> lock(global_lock);
10127    GLOBAL_CB_NODE *cb_node = getCBNode(dev_data, commandBuffer);
10128    auto render_pass_state = pRenderPassBegin ? getRenderPassState(dev_data, pRenderPassBegin->renderPass) : nullptr;
10129    auto framebuffer = pRenderPassBegin ? getFramebufferState(dev_data, pRenderPassBegin->framebuffer) : nullptr;
10130    if (cb_node) {
10131        if (render_pass_state) {
10132            uint32_t clear_op_size = 0;  // Make sure pClearValues is at least as large as last LOAD_OP_CLEAR
10133            cb_node->activeFramebuffer = pRenderPassBegin->framebuffer;
10134            for (uint32_t i = 0; i < render_pass_state->createInfo.attachmentCount; ++i) {
10135                MT_FB_ATTACHMENT_INFO &fb_info = framebuffer->attachments[i];
10136                auto pAttachment = &render_pass_state->createInfo.pAttachments[i];
10137                if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp, pAttachment->stencilLoadOp,
10138                                                         VK_ATTACHMENT_LOAD_OP_CLEAR)) {
10139                    clear_op_size = static_cast<uint32_t>(i) + 1;
10140                    std::function<bool()> function = [=]() {
10141                        SetImageMemoryValid(dev_data, getImageState(dev_data, fb_info.image), true);
10142                        return false;
10143                    };
10144                    cb_node->validate_functions.push_back(function);
10145                } else if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp,
10146                                                                pAttachment->stencilLoadOp, VK_ATTACHMENT_LOAD_OP_DONT_CARE)) {
10147                    std::function<bool()> function = [=]() {
10148                        SetImageMemoryValid(dev_data, getImageState(dev_data, fb_info.image), false);
10149                        return false;
10150                    };
10151                    cb_node->validate_functions.push_back(function);
10152                } else if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp,
10153                                                                pAttachment->stencilLoadOp, VK_ATTACHMENT_LOAD_OP_LOAD)) {
10154                    std::function<bool()> function = [=]() {
10155                        return ValidateImageMemoryIsValid(dev_data, getImageState(dev_data, fb_info.image),
10156                                                          "vkCmdBeginRenderPass()");
10157                    };
10158                    cb_node->validate_functions.push_back(function);
10159                }
10160                if (render_pass_state->attachment_first_read[i]) {
10161                    std::function<bool()> function = [=]() {
10162                        return ValidateImageMemoryIsValid(dev_data, getImageState(dev_data, fb_info.image),
10163                                                          "vkCmdBeginRenderPass()");
10164                    };
10165                    cb_node->validate_functions.push_back(function);
10166                }
10167            }
10168            if (clear_op_size > pRenderPassBegin->clearValueCount) {
10169                skip_call |= log_msg(
10170                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
10171                    reinterpret_cast<uint64_t &>(render_pass_state->renderPass), __LINE__, VALIDATION_ERROR_00442, "DS",
10172                    "In vkCmdBeginRenderPass() the VkRenderPassBeginInfo struct has a clearValueCount of %u but there must "
10173                    "be at least %u entries in pClearValues array to account for the highest index attachment in renderPass "
10174                    "0x%" PRIx64
10175                    " that uses VK_ATTACHMENT_LOAD_OP_CLEAR is %u. Note that the pClearValues array "
10176                    "is indexed by attachment number so even if some pClearValues entries between 0 and %u correspond to "
10177                    "attachments that aren't cleared they will be ignored. %s",
10178                    pRenderPassBegin->clearValueCount, clear_op_size, reinterpret_cast<uint64_t &>(render_pass_state->renderPass),
10179                    clear_op_size, clear_op_size - 1, validation_error_map[VALIDATION_ERROR_00442]);
10180            }
10181            if (clear_op_size < pRenderPassBegin->clearValueCount) {
10182                skip_call |= log_msg(
10183                    dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
10184                    reinterpret_cast<uint64_t &>(render_pass_state->renderPass), __LINE__,
10185                    DRAWSTATE_RENDERPASS_TOO_MANY_CLEAR_VALUES, "DS",
10186                    "In vkCmdBeginRenderPass() the VkRenderPassBeginInfo struct has a clearValueCount of %u but only first %u "
10187                    "entries in pClearValues array are used. The highest index of any attachment in renderPass 0x%" PRIx64
10188                    " that uses VK_ATTACHMENT_LOAD_OP_CLEAR is %u - other pClearValues are ignored.",
10189                    pRenderPassBegin->clearValueCount, clear_op_size, reinterpret_cast<uint64_t &>(render_pass_state->renderPass),
10190                    clear_op_size - 1);
10191            }
10192            skip_call |= VerifyRenderAreaBounds(dev_data, pRenderPassBegin);
10193            skip_call |= VerifyFramebufferAndRenderPassLayouts(dev_data, cb_node, pRenderPassBegin,
10194                                                               getFramebufferState(dev_data, pRenderPassBegin->framebuffer));
10195            skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdBeginRenderPass()", VALIDATION_ERROR_00440);
10196            skip_call |= ValidateDependencies(dev_data, framebuffer, render_pass_state);
10197            skip_call |= validatePrimaryCommandBuffer(dev_data, cb_node, "vkCmdBeginRenderPass", VALIDATION_ERROR_00441);
10198            skip_call |= ValidateCmd(dev_data, cb_node, CMD_BEGINRENDERPASS, "vkCmdBeginRenderPass()");
10199            UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_BEGINRENDERPASS);
10200            cb_node->activeRenderPass = render_pass_state;
10201            // This is a shallow copy as that is all that is needed for now
10202            cb_node->activeRenderPassBeginInfo = *pRenderPassBegin;
10203            cb_node->activeSubpass = 0;
10204            cb_node->activeSubpassContents = contents;
10205            cb_node->framebuffers.insert(pRenderPassBegin->framebuffer);
10206            // Connect this framebuffer and its children to this cmdBuffer
10207            AddFramebufferBinding(dev_data, cb_node, framebuffer);
10208            // transition attachments to the correct layouts for the first subpass
10209            TransitionSubpassLayouts(dev_data, cb_node, &cb_node->activeRenderPassBeginInfo, cb_node->activeSubpass, framebuffer);
10210        }
10211    }
10212    lock.unlock();
10213    if (!skip_call) {
10214        dev_data->dispatch_table.CmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
10215    }
10216}
10217
10218VKAPI_ATTR void VKAPI_CALL CmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
10219    bool skip_call = false;
10220    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
10221    std::unique_lock<std::mutex> lock(global_lock);
10222    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
10223    if (pCB) {
10224        skip_call |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdNextSubpass", VALIDATION_ERROR_00459);
10225        skip_call |= ValidateCmd(dev_data, pCB, CMD_NEXTSUBPASS, "vkCmdNextSubpass()");
10226        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_NEXTSUBPASS);
10227        skip_call |= outsideRenderPass(dev_data, pCB, "vkCmdNextSubpass()", VALIDATION_ERROR_00458);
10228
10229        auto subpassCount = pCB->activeRenderPass->createInfo.subpassCount;
10230        if (pCB->activeSubpass == subpassCount - 1) {
10231            skip_call |= log_msg(
10232                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10233                reinterpret_cast<uint64_t>(commandBuffer), __LINE__, VALIDATION_ERROR_00453, "DS",
10234                "vkCmdNextSubpass(): Attempted to advance beyond final subpass. %s", validation_error_map[VALIDATION_ERROR_00453]);
10235        }
10236    }
10237    lock.unlock();
10238
10239    if (skip_call) return;
10240
10241    dev_data->dispatch_table.CmdNextSubpass(commandBuffer, contents);
10242
10243    if (pCB) {
10244        lock.lock();
10245        pCB->activeSubpass++;
10246        pCB->activeSubpassContents = contents;
10247        TransitionSubpassLayouts(dev_data, pCB, &pCB->activeRenderPassBeginInfo, pCB->activeSubpass,
10248                                 getFramebufferState(dev_data, pCB->activeRenderPassBeginInfo.framebuffer));
10249    }
10250}
10251
10252VKAPI_ATTR void VKAPI_CALL CmdEndRenderPass(VkCommandBuffer commandBuffer) {
10253    bool skip_call = false;
10254    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
10255    std::unique_lock<std::mutex> lock(global_lock);
10256    auto pCB = getCBNode(dev_data, commandBuffer);
10257    FRAMEBUFFER_STATE *framebuffer = NULL;
10258    if (pCB) {
10259        RENDER_PASS_STATE *rp_state = pCB->activeRenderPass;
10260        framebuffer = getFramebufferState(dev_data, pCB->activeFramebuffer);
10261        if (rp_state) {
10262            if (pCB->activeSubpass != rp_state->createInfo.subpassCount - 1) {
10263                skip_call |= log_msg(
10264                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10265                    reinterpret_cast<uint64_t>(commandBuffer), __LINE__, VALIDATION_ERROR_00460, "DS",
10266                    "vkCmdEndRenderPass(): Called before reaching final subpass. %s", validation_error_map[VALIDATION_ERROR_00460]);
10267            }
10268
10269            for (size_t i = 0; i < rp_state->createInfo.attachmentCount; ++i) {
10270                MT_FB_ATTACHMENT_INFO &fb_info = framebuffer->attachments[i];
10271                auto pAttachment = &rp_state->createInfo.pAttachments[i];
10272                if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->storeOp, pAttachment->stencilStoreOp,
10273                                                         VK_ATTACHMENT_STORE_OP_STORE)) {
10274                    std::function<bool()> function = [=]() {
10275                        SetImageMemoryValid(dev_data, getImageState(dev_data, fb_info.image), true);
10276                        return false;
10277                    };
10278                    pCB->validate_functions.push_back(function);
10279                } else if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->storeOp,
10280                                                                pAttachment->stencilStoreOp, VK_ATTACHMENT_STORE_OP_DONT_CARE)) {
10281                    std::function<bool()> function = [=]() {
10282                        SetImageMemoryValid(dev_data, getImageState(dev_data, fb_info.image), false);
10283                        return false;
10284                    };
10285                    pCB->validate_functions.push_back(function);
10286                }
10287            }
10288        }
10289        skip_call |= outsideRenderPass(dev_data, pCB, "vkCmdEndRenderpass()", VALIDATION_ERROR_00464);
10290        skip_call |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdEndRenderPass", VALIDATION_ERROR_00465);
10291        skip_call |= ValidateCmd(dev_data, pCB, CMD_ENDRENDERPASS, "vkCmdEndRenderPass()");
10292        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_ENDRENDERPASS);
10293    }
10294    lock.unlock();
10295
10296    if (skip_call) return;
10297
10298    dev_data->dispatch_table.CmdEndRenderPass(commandBuffer);
10299
10300    if (pCB) {
10301        lock.lock();
10302        TransitionFinalSubpassLayouts(dev_data, pCB, &pCB->activeRenderPassBeginInfo, framebuffer);
10303        pCB->activeRenderPass = nullptr;
10304        pCB->activeSubpass = 0;
10305        pCB->activeFramebuffer = VK_NULL_HANDLE;
10306    }
10307}
10308
10309static bool logInvalidAttachmentMessage(layer_data *dev_data, VkCommandBuffer secondaryBuffer, uint32_t primaryAttach,
10310                                        uint32_t secondaryAttach, const char *msg) {
10311    return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10312                   VALIDATION_ERROR_02059, "DS", "vkCmdExecuteCommands() called w/ invalid Secondary Cmd Buffer 0x%" PRIx64
10313                                                 " which has a render pass "
10314                                                 "that is not compatible with the Primary Cmd Buffer current render pass. "
10315                                                 "Attachment %u is not compatible with %u: %s. %s",
10316                   reinterpret_cast<uint64_t &>(secondaryBuffer), primaryAttach, secondaryAttach, msg,
10317                   validation_error_map[VALIDATION_ERROR_02059]);
10318}
10319
10320static bool validateAttachmentCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer,
10321                                            VkRenderPassCreateInfo const *primaryPassCI, uint32_t primaryAttach,
10322                                            VkCommandBuffer secondaryBuffer, VkRenderPassCreateInfo const *secondaryPassCI,
10323                                            uint32_t secondaryAttach, bool is_multi) {
10324    bool skip_call = false;
10325    if (primaryPassCI->attachmentCount <= primaryAttach) {
10326        primaryAttach = VK_ATTACHMENT_UNUSED;
10327    }
10328    if (secondaryPassCI->attachmentCount <= secondaryAttach) {
10329        secondaryAttach = VK_ATTACHMENT_UNUSED;
10330    }
10331    if (primaryAttach == VK_ATTACHMENT_UNUSED && secondaryAttach == VK_ATTACHMENT_UNUSED) {
10332        return skip_call;
10333    }
10334    if (primaryAttach == VK_ATTACHMENT_UNUSED) {
10335        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach,
10336                                                 "The first is unused while the second is not.");
10337        return skip_call;
10338    }
10339    if (secondaryAttach == VK_ATTACHMENT_UNUSED) {
10340        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach,
10341                                                 "The second is unused while the first is not.");
10342        return skip_call;
10343    }
10344    if (primaryPassCI->pAttachments[primaryAttach].format != secondaryPassCI->pAttachments[secondaryAttach].format) {
10345        skip_call |=
10346            logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different formats.");
10347    }
10348    if (primaryPassCI->pAttachments[primaryAttach].samples != secondaryPassCI->pAttachments[secondaryAttach].samples) {
10349        skip_call |=
10350            logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different samples.");
10351    }
10352    if (is_multi && primaryPassCI->pAttachments[primaryAttach].flags != secondaryPassCI->pAttachments[secondaryAttach].flags) {
10353        skip_call |=
10354            logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different flags.");
10355    }
10356    return skip_call;
10357}
10358
10359static bool validateSubpassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer,
10360                                         VkRenderPassCreateInfo const *primaryPassCI, VkCommandBuffer secondaryBuffer,
10361                                         VkRenderPassCreateInfo const *secondaryPassCI, const int subpass, bool is_multi) {
10362    bool skip_call = false;
10363    const VkSubpassDescription &primary_desc = primaryPassCI->pSubpasses[subpass];
10364    const VkSubpassDescription &secondary_desc = secondaryPassCI->pSubpasses[subpass];
10365    uint32_t maxInputAttachmentCount = std::max(primary_desc.inputAttachmentCount, secondary_desc.inputAttachmentCount);
10366    for (uint32_t i = 0; i < maxInputAttachmentCount; ++i) {
10367        uint32_t primary_input_attach = VK_ATTACHMENT_UNUSED, secondary_input_attach = VK_ATTACHMENT_UNUSED;
10368        if (i < primary_desc.inputAttachmentCount) {
10369            primary_input_attach = primary_desc.pInputAttachments[i].attachment;
10370        }
10371        if (i < secondary_desc.inputAttachmentCount) {
10372            secondary_input_attach = secondary_desc.pInputAttachments[i].attachment;
10373        }
10374        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_input_attach, secondaryBuffer,
10375                                                     secondaryPassCI, secondary_input_attach, is_multi);
10376    }
10377    uint32_t maxColorAttachmentCount = std::max(primary_desc.colorAttachmentCount, secondary_desc.colorAttachmentCount);
10378    for (uint32_t i = 0; i < maxColorAttachmentCount; ++i) {
10379        uint32_t primary_color_attach = VK_ATTACHMENT_UNUSED, secondary_color_attach = VK_ATTACHMENT_UNUSED;
10380        if (i < primary_desc.colorAttachmentCount) {
10381            primary_color_attach = primary_desc.pColorAttachments[i].attachment;
10382        }
10383        if (i < secondary_desc.colorAttachmentCount) {
10384            secondary_color_attach = secondary_desc.pColorAttachments[i].attachment;
10385        }
10386        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_color_attach, secondaryBuffer,
10387                                                     secondaryPassCI, secondary_color_attach, is_multi);
10388        uint32_t primary_resolve_attach = VK_ATTACHMENT_UNUSED, secondary_resolve_attach = VK_ATTACHMENT_UNUSED;
10389        if (i < primary_desc.colorAttachmentCount && primary_desc.pResolveAttachments) {
10390            primary_resolve_attach = primary_desc.pResolveAttachments[i].attachment;
10391        }
10392        if (i < secondary_desc.colorAttachmentCount && secondary_desc.pResolveAttachments) {
10393            secondary_resolve_attach = secondary_desc.pResolveAttachments[i].attachment;
10394        }
10395        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_resolve_attach,
10396                                                     secondaryBuffer, secondaryPassCI, secondary_resolve_attach, is_multi);
10397    }
10398    uint32_t primary_depthstencil_attach = VK_ATTACHMENT_UNUSED, secondary_depthstencil_attach = VK_ATTACHMENT_UNUSED;
10399    if (primary_desc.pDepthStencilAttachment) {
10400        primary_depthstencil_attach = primary_desc.pDepthStencilAttachment[0].attachment;
10401    }
10402    if (secondary_desc.pDepthStencilAttachment) {
10403        secondary_depthstencil_attach = secondary_desc.pDepthStencilAttachment[0].attachment;
10404    }
10405    skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_depthstencil_attach,
10406                                                 secondaryBuffer, secondaryPassCI, secondary_depthstencil_attach, is_multi);
10407    return skip_call;
10408}
10409
10410// Verify that given renderPass CreateInfo for primary and secondary command buffers are compatible.
10411//  This function deals directly with the CreateInfo, there are overloaded versions below that can take the renderPass handle and
10412//  will then feed into this function
10413static bool validateRenderPassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer,
10414                                            VkRenderPassCreateInfo const *primaryPassCI, VkCommandBuffer secondaryBuffer,
10415                                            VkRenderPassCreateInfo const *secondaryPassCI) {
10416    bool skip_call = false;
10417
10418    if (primaryPassCI->subpassCount != secondaryPassCI->subpassCount) {
10419        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10420                             DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10421                             "vkCmdExecuteCommands() called w/ invalid secondary Cmd Buffer 0x%" PRIx64
10422                             " that has a subpassCount of %u that is incompatible with the primary Cmd Buffer 0x%" PRIx64
10423                             " that has a subpassCount of %u.",
10424                             reinterpret_cast<uint64_t &>(secondaryBuffer), secondaryPassCI->subpassCount,
10425                             reinterpret_cast<uint64_t &>(primaryBuffer), primaryPassCI->subpassCount);
10426    } else {
10427        for (uint32_t i = 0; i < primaryPassCI->subpassCount; ++i) {
10428            skip_call |= validateSubpassCompatibility(dev_data, primaryBuffer, primaryPassCI, secondaryBuffer, secondaryPassCI, i,
10429                                                      primaryPassCI->subpassCount > 1);
10430        }
10431    }
10432    return skip_call;
10433}
10434
10435static bool validateFramebuffer(layer_data *dev_data, VkCommandBuffer primaryBuffer, const GLOBAL_CB_NODE *pCB,
10436                                VkCommandBuffer secondaryBuffer, const GLOBAL_CB_NODE *pSubCB) {
10437    bool skip_call = false;
10438    if (!pSubCB->beginInfo.pInheritanceInfo) {
10439        return skip_call;
10440    }
10441    VkFramebuffer primary_fb = pCB->activeFramebuffer;
10442    VkFramebuffer secondary_fb = pSubCB->beginInfo.pInheritanceInfo->framebuffer;
10443    if (secondary_fb != VK_NULL_HANDLE) {
10444        if (primary_fb != secondary_fb) {
10445            skip_call |= log_msg(
10446                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10447                VALIDATION_ERROR_02060, "DS",
10448                "vkCmdExecuteCommands() called w/ invalid secondary command buffer 0x%" PRIx64 " which has a framebuffer 0x%" PRIx64
10449                " that is not the same as the primary command buffer's current active framebuffer 0x%" PRIx64 ". %s",
10450                reinterpret_cast<uint64_t &>(secondaryBuffer), reinterpret_cast<uint64_t &>(secondary_fb),
10451                reinterpret_cast<uint64_t &>(primary_fb), validation_error_map[VALIDATION_ERROR_02060]);
10452        }
10453        auto fb = getFramebufferState(dev_data, secondary_fb);
10454        if (!fb) {
10455            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10456                                 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10457                                 "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
10458                                 "which has invalid framebuffer 0x%" PRIx64 ".",
10459                                 (void *)secondaryBuffer, (uint64_t)(secondary_fb));
10460            return skip_call;
10461        }
10462        auto cb_renderpass = getRenderPassState(dev_data, pSubCB->beginInfo.pInheritanceInfo->renderPass);
10463        if (cb_renderpass->renderPass != fb->createInfo.renderPass) {
10464            skip_call |= validateRenderPassCompatibility(dev_data, secondaryBuffer, fb->renderPassCreateInfo.ptr(), secondaryBuffer,
10465                                                         cb_renderpass->createInfo.ptr());
10466        }
10467    }
10468    return skip_call;
10469}
10470
10471static bool validateSecondaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, GLOBAL_CB_NODE *pSubCB) {
10472    bool skip_call = false;
10473    unordered_set<int> activeTypes;
10474    for (auto queryObject : pCB->activeQueries) {
10475        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
10476        if (queryPoolData != dev_data->queryPoolMap.end()) {
10477            if (queryPoolData->second.createInfo.queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS &&
10478                pSubCB->beginInfo.pInheritanceInfo) {
10479                VkQueryPipelineStatisticFlags cmdBufStatistics = pSubCB->beginInfo.pInheritanceInfo->pipelineStatistics;
10480                if ((cmdBufStatistics & queryPoolData->second.createInfo.pipelineStatistics) != cmdBufStatistics) {
10481                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
10482                                         __LINE__, VALIDATION_ERROR_02065, "DS",
10483                                         "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
10484                                         "which has invalid active query pool 0x%" PRIx64
10485                                         ". Pipeline statistics is being queried so the command "
10486                                         "buffer must have all bits set on the queryPool. %s",
10487                                         pCB->commandBuffer, reinterpret_cast<const uint64_t &>(queryPoolData->first),
10488                                         validation_error_map[VALIDATION_ERROR_02065]);
10489                }
10490            }
10491            activeTypes.insert(queryPoolData->second.createInfo.queryType);
10492        }
10493    }
10494    for (auto queryObject : pSubCB->startedQueries) {
10495        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
10496        if (queryPoolData != dev_data->queryPoolMap.end() && activeTypes.count(queryPoolData->second.createInfo.queryType)) {
10497            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10498                                 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10499                                 "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
10500                                 "which has invalid active query pool 0x%" PRIx64
10501                                 "of type %d but a query of that type has been started on "
10502                                 "secondary Cmd Buffer 0x%p.",
10503                                 pCB->commandBuffer, reinterpret_cast<const uint64_t &>(queryPoolData->first),
10504                                 queryPoolData->second.createInfo.queryType, pSubCB->commandBuffer);
10505        }
10506    }
10507
10508    auto primary_pool = getCommandPoolNode(dev_data, pCB->createInfo.commandPool);
10509    auto secondary_pool = getCommandPoolNode(dev_data, pSubCB->createInfo.commandPool);
10510    if (primary_pool && secondary_pool && (primary_pool->queueFamilyIndex != secondary_pool->queueFamilyIndex)) {
10511        skip_call |=
10512            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10513                    reinterpret_cast<uint64_t>(pSubCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_QUEUE_FAMILY, "DS",
10514                    "vkCmdExecuteCommands(): Primary command buffer 0x%p"
10515                    " created in queue family %d has secondary command buffer 0x%p created in queue family %d.",
10516                    pCB->commandBuffer, primary_pool->queueFamilyIndex, pSubCB->commandBuffer, secondary_pool->queueFamilyIndex);
10517    }
10518
10519    return skip_call;
10520}
10521
10522VKAPI_ATTR void VKAPI_CALL CmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount,
10523                                              const VkCommandBuffer *pCommandBuffers) {
10524    bool skip_call = false;
10525    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
10526    std::unique_lock<std::mutex> lock(global_lock);
10527    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
10528    if (pCB) {
10529        GLOBAL_CB_NODE *pSubCB = NULL;
10530        for (uint32_t i = 0; i < commandBuffersCount; i++) {
10531            pSubCB = getCBNode(dev_data, pCommandBuffers[i]);
10532            assert(pSubCB);
10533            if (VK_COMMAND_BUFFER_LEVEL_PRIMARY == pSubCB->createInfo.level) {
10534                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
10535                                     __LINE__, VALIDATION_ERROR_00153, "DS",
10536                                     "vkCmdExecuteCommands() called w/ Primary Cmd Buffer 0x%p in element %u of pCommandBuffers "
10537                                     "array. All cmd buffers in pCommandBuffers array must be secondary. %s",
10538                                     pCommandBuffers[i], i, validation_error_map[VALIDATION_ERROR_00153]);
10539            } else if (pCB->activeRenderPass) {  // Secondary CB w/i RenderPass must have *CONTINUE_BIT set
10540                auto secondary_rp_state = getRenderPassState(dev_data, pSubCB->beginInfo.pInheritanceInfo->renderPass);
10541                if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
10542                    skip_call |= log_msg(
10543                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10544                        (uint64_t)pCommandBuffers[i], __LINE__, VALIDATION_ERROR_02057, "DS",
10545                        "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) executed within render pass (0x%" PRIxLEAST64
10546                        ") must have had vkBeginCommandBuffer() called w/ VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set. %s",
10547                        pCommandBuffers[i], (uint64_t)pCB->activeRenderPass->renderPass,
10548                        validation_error_map[VALIDATION_ERROR_02057]);
10549                } else {
10550                    // Make sure render pass is compatible with parent command buffer pass if has continue
10551                    if (pCB->activeRenderPass->renderPass != secondary_rp_state->renderPass) {
10552                        skip_call |=
10553                            validateRenderPassCompatibility(dev_data, commandBuffer, pCB->activeRenderPass->createInfo.ptr(),
10554                                                            pCommandBuffers[i], secondary_rp_state->createInfo.ptr());
10555                    }
10556                    //  If framebuffer for secondary CB is not NULL, then it must match active FB from primaryCB
10557                    skip_call |= validateFramebuffer(dev_data, commandBuffer, pCB, pCommandBuffers[i], pSubCB);
10558                }
10559                string errorString = "";
10560                // secondaryCB must have been created w/ RP compatible w/ primaryCB active renderpass
10561                if ((pCB->activeRenderPass->renderPass != secondary_rp_state->renderPass) &&
10562                    !verify_renderpass_compatibility(dev_data, pCB->activeRenderPass->createInfo.ptr(),
10563                                                     secondary_rp_state->createInfo.ptr(), errorString)) {
10564                    skip_call |= log_msg(
10565                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10566                        (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
10567                        "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) w/ render pass (0x%" PRIxLEAST64
10568                        ") is incompatible w/ primary command buffer (0x%p) w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
10569                        pCommandBuffers[i], (uint64_t)pSubCB->beginInfo.pInheritanceInfo->renderPass, commandBuffer,
10570                        (uint64_t)pCB->activeRenderPass->renderPass, errorString.c_str());
10571                }
10572            }
10573            // TODO(mlentine): Move more logic into this method
10574            skip_call |= validateSecondaryCommandBufferState(dev_data, pCB, pSubCB);
10575            skip_call |= validateCommandBufferState(dev_data, pSubCB, "vkCmdExecuteCommands()");
10576            // Secondary cmdBuffers are considered pending execution starting w/
10577            // being recorded
10578            if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
10579                if (dev_data->globalInFlightCmdBuffers.find(pSubCB->commandBuffer) != dev_data->globalInFlightCmdBuffers.end()) {
10580                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10581                                         VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)(pCB->commandBuffer), __LINE__,
10582                                         VALIDATION_ERROR_00154, "DS",
10583                                         "Attempt to simultaneously execute command buffer 0x%p"
10584                                         " without VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set! %s",
10585                                         pCB->commandBuffer, validation_error_map[VALIDATION_ERROR_00154]);
10586                }
10587                if (pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) {
10588                    // Warn that non-simultaneous secondary cmd buffer renders primary non-simultaneous
10589                    skip_call |= log_msg(
10590                        dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10591                        (uint64_t)(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
10592                        "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) "
10593                        "does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary command buffer "
10594                        "(0x%p) to be treated as if it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
10595                        "set, even though it does.",
10596                        pCommandBuffers[i], pCB->commandBuffer);
10597                    pCB->beginInfo.flags &= ~VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
10598                }
10599            }
10600            if (!pCB->activeQueries.empty() && !dev_data->enabled_features.inheritedQueries) {
10601                skip_call |=
10602                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10603                            reinterpret_cast<uint64_t>(pCommandBuffers[i]), __LINE__, VALIDATION_ERROR_02062, "DS",
10604                            "vkCmdExecuteCommands(): Secondary Command Buffer "
10605                            "(0x%p) cannot be submitted with a query in "
10606                            "flight and inherited queries not "
10607                            "supported on this device. %s",
10608                            pCommandBuffers[i], validation_error_map[VALIDATION_ERROR_02062]);
10609            }
10610            // Propagate layout transitions to the primary cmd buffer
10611            for (auto ilm_entry : pSubCB->imageLayoutMap) {
10612                SetLayout(dev_data, pCB, ilm_entry.first, ilm_entry.second);
10613            }
10614            pSubCB->primaryCommandBuffer = pCB->commandBuffer;
10615            pCB->secondaryCommandBuffers.insert(pSubCB->commandBuffer);
10616            dev_data->globalInFlightCmdBuffers.insert(pSubCB->commandBuffer);
10617            for (auto &function : pSubCB->queryUpdates) {
10618                pCB->queryUpdates.push_back(function);
10619            }
10620        }
10621        skip_call |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdExecuteComands", VALIDATION_ERROR_00163);
10622        skip_call |= ValidateCmd(dev_data, pCB, CMD_EXECUTECOMMANDS, "vkCmdExecuteComands()");
10623        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_EXECUTECOMMANDS);
10624    }
10625    lock.unlock();
10626    if (!skip_call) dev_data->dispatch_table.CmdExecuteCommands(commandBuffer, commandBuffersCount, pCommandBuffers);
10627}
10628
10629// For any image objects that overlap mapped memory, verify that their layouts are PREINIT or GENERAL
10630static bool ValidateMapImageLayouts(VkDevice device, DEVICE_MEM_INFO const *mem_info, VkDeviceSize offset,
10631                                    VkDeviceSize end_offset) {
10632    bool skip_call = false;
10633    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10634    // Iterate over all bound image ranges and verify that for any that overlap the
10635    //  map ranges, the layouts are VK_IMAGE_LAYOUT_PREINITIALIZED or VK_IMAGE_LAYOUT_GENERAL
10636    // TODO : This can be optimized if we store ranges based on starting address and early exit when we pass our range
10637    for (auto image_handle : mem_info->bound_images) {
10638        auto img_it = mem_info->bound_ranges.find(image_handle);
10639        if (img_it != mem_info->bound_ranges.end()) {
10640            if (rangesIntersect(dev_data, &img_it->second, offset, end_offset)) {
10641                std::vector<VkImageLayout> layouts;
10642                if (FindLayouts(dev_data, VkImage(image_handle), layouts)) {
10643                    for (auto layout : layouts) {
10644                        if (layout != VK_IMAGE_LAYOUT_PREINITIALIZED && layout != VK_IMAGE_LAYOUT_GENERAL) {
10645                            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10646                                                 (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
10647                                                 "Cannot map an image with layout %s. Only "
10648                                                 "GENERAL or PREINITIALIZED are supported.",
10649                                                 string_VkImageLayout(layout));
10650                        }
10651                    }
10652                }
10653            }
10654        }
10655    }
10656    return skip_call;
10657}
10658
10659VKAPI_ATTR VkResult VKAPI_CALL MapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags,
10660                                         void **ppData) {
10661    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10662
10663    bool skip_call = false;
10664    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10665    std::unique_lock<std::mutex> lock(global_lock);
10666    DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem);
10667    if (mem_info) {
10668        // TODO : This could me more fine-grained to track just region that is valid
10669        mem_info->global_valid = true;
10670        auto end_offset = (VK_WHOLE_SIZE == size) ? mem_info->alloc_info.allocationSize - 1 : offset + size - 1;
10671        skip_call |= ValidateMapImageLayouts(device, mem_info, offset, end_offset);
10672        // TODO : Do we need to create new "bound_range" for the mapped range?
10673        SetMemRangesValid(dev_data, mem_info, offset, end_offset);
10674        if ((dev_data->phys_dev_mem_props.memoryTypes[mem_info->alloc_info.memoryTypeIndex].propertyFlags &
10675             VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
10676            skip_call = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10677                                (uint64_t)mem, __LINE__, VALIDATION_ERROR_00629, "MEM",
10678                                "Mapping Memory without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set: mem obj 0x%" PRIxLEAST64 ". %s",
10679                                (uint64_t)mem, validation_error_map[VALIDATION_ERROR_00629]);
10680        }
10681    }
10682    skip_call |= ValidateMapMemRange(dev_data, mem, offset, size);
10683    lock.unlock();
10684
10685    if (!skip_call) {
10686        result = dev_data->dispatch_table.MapMemory(device, mem, offset, size, flags, ppData);
10687        if (VK_SUCCESS == result) {
10688            lock.lock();
10689            // TODO : What's the point of this range? See comment on creating new "bound_range" above, which may replace this
10690            storeMemRanges(dev_data, mem, offset, size);
10691            initializeAndTrackMemory(dev_data, mem, offset, size, ppData);
10692            lock.unlock();
10693        }
10694    }
10695    return result;
10696}
10697
10698VKAPI_ATTR void VKAPI_CALL UnmapMemory(VkDevice device, VkDeviceMemory mem) {
10699    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10700    bool skip_call = false;
10701
10702    std::unique_lock<std::mutex> lock(global_lock);
10703    skip_call |= deleteMemRanges(dev_data, mem);
10704    lock.unlock();
10705    if (!skip_call) {
10706        dev_data->dispatch_table.UnmapMemory(device, mem);
10707    }
10708}
10709
10710static bool validateMemoryIsMapped(layer_data *dev_data, const char *funcName, uint32_t memRangeCount,
10711                                   const VkMappedMemoryRange *pMemRanges) {
10712    bool skip = false;
10713    for (uint32_t i = 0; i < memRangeCount; ++i) {
10714        auto mem_info = getMemObjInfo(dev_data, pMemRanges[i].memory);
10715        if (mem_info) {
10716            if (pMemRanges[i].size == VK_WHOLE_SIZE) {
10717                if (mem_info->mem_range.offset > pMemRanges[i].offset) {
10718                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10719                                    VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
10720                                    VALIDATION_ERROR_00643, "MEM", "%s: Flush/Invalidate offset (" PRINTF_SIZE_T_SPECIFIER
10721                                                                   ") is less than Memory Object's offset "
10722                                                                   "(" PRINTF_SIZE_T_SPECIFIER "). %s",
10723                                    funcName, static_cast<size_t>(pMemRanges[i].offset),
10724                                    static_cast<size_t>(mem_info->mem_range.offset), validation_error_map[VALIDATION_ERROR_00643]);
10725                }
10726            } else {
10727                const uint64_t data_end = (mem_info->mem_range.size == VK_WHOLE_SIZE)
10728                                              ? mem_info->alloc_info.allocationSize
10729                                              : (mem_info->mem_range.offset + mem_info->mem_range.size);
10730                if ((mem_info->mem_range.offset > pMemRanges[i].offset) ||
10731                    (data_end < (pMemRanges[i].offset + pMemRanges[i].size))) {
10732                    skip |=
10733                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10734                                (uint64_t)pMemRanges[i].memory, __LINE__, VALIDATION_ERROR_00642, "MEM",
10735                                "%s: Flush/Invalidate size or offset (" PRINTF_SIZE_T_SPECIFIER ", " PRINTF_SIZE_T_SPECIFIER
10736                                ") exceed the Memory Object's upper-bound "
10737                                "(" PRINTF_SIZE_T_SPECIFIER "). %s",
10738                                funcName, static_cast<size_t>(pMemRanges[i].offset + pMemRanges[i].size),
10739                                static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(data_end),
10740                                validation_error_map[VALIDATION_ERROR_00642]);
10741                }
10742            }
10743        }
10744    }
10745    return skip;
10746}
10747
10748static bool ValidateAndCopyNoncoherentMemoryToDriver(layer_data *dev_data, uint32_t mem_range_count,
10749                                                     const VkMappedMemoryRange *mem_ranges) {
10750    bool skip = false;
10751    for (uint32_t i = 0; i < mem_range_count; ++i) {
10752        auto mem_info = getMemObjInfo(dev_data, mem_ranges[i].memory);
10753        if (mem_info) {
10754            if (mem_info->shadow_copy) {
10755                VkDeviceSize size = (mem_info->mem_range.size != VK_WHOLE_SIZE)
10756                                        ? mem_info->mem_range.size
10757                                        : (mem_info->alloc_info.allocationSize - mem_info->mem_range.offset);
10758                char *data = static_cast<char *>(mem_info->shadow_copy);
10759                for (uint64_t j = 0; j < mem_info->shadow_pad_size; ++j) {
10760                    if (data[j] != NoncoherentMemoryFillValue) {
10761                        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10762                                        VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem_ranges[i].memory, __LINE__,
10763                                        MEMTRACK_INVALID_MAP, "MEM", "Memory underflow was detected on mem obj 0x%" PRIxLEAST64,
10764                                        (uint64_t)mem_ranges[i].memory);
10765                    }
10766                }
10767                for (uint64_t j = (size + mem_info->shadow_pad_size); j < (2 * mem_info->shadow_pad_size + size); ++j) {
10768                    if (data[j] != NoncoherentMemoryFillValue) {
10769                        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10770                                        VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem_ranges[i].memory, __LINE__,
10771                                        MEMTRACK_INVALID_MAP, "MEM", "Memory overflow was detected on mem obj 0x%" PRIxLEAST64,
10772                                        (uint64_t)mem_ranges[i].memory);
10773                    }
10774                }
10775                memcpy(mem_info->p_driver_data, static_cast<void *>(data + mem_info->shadow_pad_size), (size_t)(size));
10776            }
10777        }
10778    }
10779    return skip;
10780}
10781
10782static void CopyNoncoherentMemoryFromDriver(layer_data *dev_data, uint32_t mem_range_count, const VkMappedMemoryRange *mem_ranges) {
10783    for (uint32_t i = 0; i < mem_range_count; ++i) {
10784        auto mem_info = getMemObjInfo(dev_data, mem_ranges[i].memory);
10785        if (mem_info && mem_info->shadow_copy) {
10786            VkDeviceSize size = (mem_info->mem_range.size != VK_WHOLE_SIZE)
10787                                    ? mem_info->mem_range.size
10788                                    : (mem_info->alloc_info.allocationSize - mem_ranges[i].offset);
10789            char *data = static_cast<char *>(mem_info->shadow_copy);
10790            memcpy(data + mem_info->shadow_pad_size, mem_info->p_driver_data, (size_t)(size));
10791        }
10792    }
10793}
10794
10795static bool ValidateMappedMemoryRangeDeviceLimits(layer_data *dev_data, const char *func_name, uint32_t mem_range_count,
10796                                                  const VkMappedMemoryRange *mem_ranges) {
10797    bool skip = false;
10798    for (uint32_t i = 0; i < mem_range_count; ++i) {
10799        uint64_t atom_size = dev_data->phys_dev_properties.properties.limits.nonCoherentAtomSize;
10800        if (vk_safe_modulo(mem_ranges[i].offset, atom_size) != 0) {
10801            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
10802                            __LINE__, VALIDATION_ERROR_00644, "MEM",
10803                            "%s: Offset in pMemRanges[%d] is 0x%" PRIxLEAST64
10804                            ", which is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 "). %s",
10805                            func_name, i, mem_ranges[i].offset, atom_size, validation_error_map[VALIDATION_ERROR_00644]);
10806        }
10807        if ((mem_ranges[i].size != VK_WHOLE_SIZE) && (vk_safe_modulo(mem_ranges[i].size, atom_size) != 0)) {
10808            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
10809                            __LINE__, VALIDATION_ERROR_00645, "MEM",
10810                            "%s: Size in pMemRanges[%d] is 0x%" PRIxLEAST64
10811                            ", which is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 "). %s",
10812                            func_name, i, mem_ranges[i].size, atom_size, validation_error_map[VALIDATION_ERROR_00645]);
10813        }
10814    }
10815    return skip;
10816}
10817
10818static bool PreCallValidateFlushMappedMemoryRanges(layer_data *dev_data, uint32_t mem_range_count,
10819                                                   const VkMappedMemoryRange *mem_ranges) {
10820    bool skip = false;
10821    std::lock_guard<std::mutex> lock(global_lock);
10822    skip |= ValidateAndCopyNoncoherentMemoryToDriver(dev_data, mem_range_count, mem_ranges);
10823    skip |= validateMemoryIsMapped(dev_data, "vkFlushMappedMemoryRanges", mem_range_count, mem_ranges);
10824    return skip;
10825}
10826
10827VKAPI_ATTR VkResult VKAPI_CALL FlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount,
10828                                                       const VkMappedMemoryRange *pMemRanges) {
10829    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10830    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10831
10832    if (!PreCallValidateFlushMappedMemoryRanges(dev_data, memRangeCount, pMemRanges)) {
10833        result = dev_data->dispatch_table.FlushMappedMemoryRanges(device, memRangeCount, pMemRanges);
10834    }
10835    return result;
10836}
10837
10838static bool PreCallValidateInvalidateMappedMemoryRanges(layer_data *dev_data, uint32_t mem_range_count,
10839                                                        const VkMappedMemoryRange *mem_ranges) {
10840    bool skip = false;
10841    std::lock_guard<std::mutex> lock(global_lock);
10842    skip |= validateMemoryIsMapped(dev_data, "vkInvalidateMappedMemoryRanges", mem_range_count, mem_ranges);
10843    return skip;
10844}
10845
10846static void PostCallRecordInvalidateMappedMemoryRanges(layer_data *dev_data, uint32_t mem_range_count,
10847                                                       const VkMappedMemoryRange *mem_ranges) {
10848    std::lock_guard<std::mutex> lock(global_lock);
10849    // Update our shadow copy with modified driver data
10850    CopyNoncoherentMemoryFromDriver(dev_data, mem_range_count, mem_ranges);
10851}
10852
10853VKAPI_ATTR VkResult VKAPI_CALL InvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount,
10854                                                            const VkMappedMemoryRange *pMemRanges) {
10855    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10856    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10857
10858    if (!PreCallValidateInvalidateMappedMemoryRanges(dev_data, memRangeCount, pMemRanges)) {
10859        result = dev_data->dispatch_table.InvalidateMappedMemoryRanges(device, memRangeCount, pMemRanges);
10860        if (result == VK_SUCCESS) {
10861            PostCallRecordInvalidateMappedMemoryRanges(dev_data, memRangeCount, pMemRanges);
10862        }
10863    }
10864    return result;
10865}
10866
10867VKAPI_ATTR VkResult VKAPI_CALL BindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
10868    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10869    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10870    bool skip_call = false;
10871    std::unique_lock<std::mutex> lock(global_lock);
10872    auto image_state = getImageState(dev_data, image);
10873    if (image_state) {
10874        // Track objects tied to memory
10875        uint64_t image_handle = reinterpret_cast<uint64_t &>(image);
10876        skip_call = SetMemBinding(dev_data, mem, image_handle, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "vkBindImageMemory");
10877        if (!image_state->memory_requirements_checked) {
10878            // There's not an explicit requirement in the spec to call vkGetImageMemoryRequirements() prior to calling
10879            //  BindImageMemory but it's implied in that memory being bound must conform with VkMemoryRequirements from
10880            //  vkGetImageMemoryRequirements()
10881            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
10882                                 image_handle, __LINE__, DRAWSTATE_INVALID_IMAGE, "DS",
10883                                 "vkBindImageMemory(): Binding memory to image 0x%" PRIxLEAST64
10884                                 " but vkGetImageMemoryRequirements() has not been called on that image.",
10885                                 image_handle);
10886            // Make the call for them so we can verify the state
10887            lock.unlock();
10888            dev_data->dispatch_table.GetImageMemoryRequirements(device, image, &image_state->requirements);
10889            lock.lock();
10890        }
10891
10892        // Track and validate bound memory range information
10893        auto mem_info = getMemObjInfo(dev_data, mem);
10894        if (mem_info) {
10895            skip_call |= InsertImageMemoryRange(dev_data, image, mem_info, memoryOffset, image_state->requirements,
10896                                                image_state->createInfo.tiling == VK_IMAGE_TILING_LINEAR);
10897            skip_call |= ValidateMemoryTypes(dev_data, mem_info, image_state->requirements.memoryTypeBits, "vkBindImageMemory()",
10898                                             VALIDATION_ERROR_00806);
10899        }
10900
10901        lock.unlock();
10902        if (!skip_call) {
10903            result = dev_data->dispatch_table.BindImageMemory(device, image, mem, memoryOffset);
10904            lock.lock();
10905            image_state->binding.mem = mem;
10906            image_state->binding.offset = memoryOffset;
10907            image_state->binding.size = image_state->requirements.size;
10908            lock.unlock();
10909        }
10910    } else {
10911        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10912                reinterpret_cast<const uint64_t &>(image), __LINE__, MEMTRACK_INVALID_OBJECT, "MT",
10913                "vkBindImageMemory: Cannot find invalid image 0x%" PRIx64 ", has it already been deleted?",
10914                reinterpret_cast<const uint64_t &>(image));
10915    }
10916    return result;
10917}
10918
10919VKAPI_ATTR VkResult VKAPI_CALL SetEvent(VkDevice device, VkEvent event) {
10920    bool skip_call = false;
10921    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10922    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10923    std::unique_lock<std::mutex> lock(global_lock);
10924    auto event_state = getEventNode(dev_data, event);
10925    if (event_state) {
10926        event_state->needsSignaled = false;
10927        event_state->stageMask = VK_PIPELINE_STAGE_HOST_BIT;
10928        if (event_state->write_in_use) {
10929            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
10930                                 reinterpret_cast<const uint64_t &>(event), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
10931                                 "Cannot call vkSetEvent() on event 0x%" PRIxLEAST64 " that is already in use by a command buffer.",
10932                                 reinterpret_cast<const uint64_t &>(event));
10933        }
10934    }
10935    lock.unlock();
10936    // Host setting event is visible to all queues immediately so update stageMask for any queue that's seen this event
10937    // TODO : For correctness this needs separate fix to verify that app doesn't make incorrect assumptions about the
10938    // ordering of this command in relation to vkCmd[Set|Reset]Events (see GH297)
10939    for (auto queue_data : dev_data->queueMap) {
10940        auto event_entry = queue_data.second.eventToStageMap.find(event);
10941        if (event_entry != queue_data.second.eventToStageMap.end()) {
10942            event_entry->second |= VK_PIPELINE_STAGE_HOST_BIT;
10943        }
10944    }
10945    if (!skip_call) result = dev_data->dispatch_table.SetEvent(device, event);
10946    return result;
10947}
10948
10949VKAPI_ATTR VkResult VKAPI_CALL QueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo,
10950                                               VkFence fence) {
10951    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
10952    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10953    bool skip_call = false;
10954    std::unique_lock<std::mutex> lock(global_lock);
10955    auto pFence = getFenceNode(dev_data, fence);
10956    auto pQueue = getQueueState(dev_data, queue);
10957
10958    // First verify that fence is not in use
10959    skip_call |= ValidateFenceForSubmit(dev_data, pFence);
10960
10961    if (pFence) {
10962        SubmitFence(pQueue, pFence, bindInfoCount);
10963    }
10964
10965    for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) {
10966        const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx];
10967        // Track objects tied to memory
10968        for (uint32_t j = 0; j < bindInfo.bufferBindCount; j++) {
10969            for (uint32_t k = 0; k < bindInfo.pBufferBinds[j].bindCount; k++) {
10970                auto sparse_binding = bindInfo.pBufferBinds[j].pBinds[k];
10971                if (SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, sparse_binding.size},
10972                                        (uint64_t)bindInfo.pBufferBinds[j].buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
10973                                        "vkQueueBindSparse"))
10974                    skip_call = true;
10975            }
10976        }
10977        for (uint32_t j = 0; j < bindInfo.imageOpaqueBindCount; j++) {
10978            for (uint32_t k = 0; k < bindInfo.pImageOpaqueBinds[j].bindCount; k++) {
10979                auto sparse_binding = bindInfo.pImageOpaqueBinds[j].pBinds[k];
10980                if (SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, sparse_binding.size},
10981                                        (uint64_t)bindInfo.pImageOpaqueBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
10982                                        "vkQueueBindSparse"))
10983                    skip_call = true;
10984            }
10985        }
10986        for (uint32_t j = 0; j < bindInfo.imageBindCount; j++) {
10987            for (uint32_t k = 0; k < bindInfo.pImageBinds[j].bindCount; k++) {
10988                auto sparse_binding = bindInfo.pImageBinds[j].pBinds[k];
10989                // TODO: This size is broken for non-opaque bindings, need to update to comprehend full sparse binding data
10990                VkDeviceSize size = sparse_binding.extent.depth * sparse_binding.extent.height * sparse_binding.extent.width * 4;
10991                if (SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, size},
10992                                        (uint64_t)bindInfo.pImageBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
10993                                        "vkQueueBindSparse"))
10994                    skip_call = true;
10995            }
10996        }
10997
10998        std::vector<SEMAPHORE_WAIT> semaphore_waits;
10999        std::vector<VkSemaphore> semaphore_signals;
11000        for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) {
11001            VkSemaphore semaphore = bindInfo.pWaitSemaphores[i];
11002            auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
11003            if (pSemaphore) {
11004                if (pSemaphore->signaled) {
11005                    if (pSemaphore->signaler.first != VK_NULL_HANDLE) {
11006                        semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second});
11007                        pSemaphore->in_use.fetch_add(1);
11008                    }
11009                    pSemaphore->signaler.first = VK_NULL_HANDLE;
11010                    pSemaphore->signaled = false;
11011                } else {
11012                    skip_call |= log_msg(
11013                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
11014                        reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
11015                        "vkQueueBindSparse: Queue 0x%p is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.",
11016                        queue, reinterpret_cast<const uint64_t &>(semaphore));
11017                }
11018            }
11019        }
11020        for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) {
11021            VkSemaphore semaphore = bindInfo.pSignalSemaphores[i];
11022            auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
11023            if (pSemaphore) {
11024                if (pSemaphore->signaled) {
11025                    skip_call =
11026                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
11027                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
11028                                "vkQueueBindSparse: Queue 0x%p is signaling semaphore 0x%" PRIx64
11029                                ", but that semaphore is already signaled.",
11030                                queue, reinterpret_cast<const uint64_t &>(semaphore));
11031                } else {
11032                    pSemaphore->signaler.first = queue;
11033                    pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1;
11034                    pSemaphore->signaled = true;
11035                    pSemaphore->in_use.fetch_add(1);
11036                    semaphore_signals.push_back(semaphore);
11037                }
11038            }
11039        }
11040
11041        pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(), semaphore_waits, semaphore_signals,
11042                                         bindIdx == bindInfoCount - 1 ? fence : VK_NULL_HANDLE);
11043    }
11044
11045    if (pFence && !bindInfoCount) {
11046        // No work to do, just dropping a fence in the queue by itself.
11047        pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(), std::vector<SEMAPHORE_WAIT>(), std::vector<VkSemaphore>(),
11048                                         fence);
11049    }
11050
11051    lock.unlock();
11052
11053    if (!skip_call) return dev_data->dispatch_table.QueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
11054
11055    return result;
11056}
11057
11058VKAPI_ATTR VkResult VKAPI_CALL CreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo,
11059                                               const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore) {
11060    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11061    VkResult result = dev_data->dispatch_table.CreateSemaphore(device, pCreateInfo, pAllocator, pSemaphore);
11062    if (result == VK_SUCCESS) {
11063        std::lock_guard<std::mutex> lock(global_lock);
11064        SEMAPHORE_NODE *sNode = &dev_data->semaphoreMap[*pSemaphore];
11065        sNode->signaler.first = VK_NULL_HANDLE;
11066        sNode->signaler.second = 0;
11067        sNode->signaled = false;
11068    }
11069    return result;
11070}
11071
11072VKAPI_ATTR VkResult VKAPI_CALL CreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo,
11073                                           const VkAllocationCallbacks *pAllocator, VkEvent *pEvent) {
11074    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11075    VkResult result = dev_data->dispatch_table.CreateEvent(device, pCreateInfo, pAllocator, pEvent);
11076    if (result == VK_SUCCESS) {
11077        std::lock_guard<std::mutex> lock(global_lock);
11078        dev_data->eventMap[*pEvent].needsSignaled = false;
11079        dev_data->eventMap[*pEvent].write_in_use = 0;
11080        dev_data->eventMap[*pEvent].stageMask = VkPipelineStageFlags(0);
11081    }
11082    return result;
11083}
11084
11085static bool PreCallValidateCreateSwapchainKHR(layer_data *dev_data, const char *func_name,
11086                                              VkSwapchainCreateInfoKHR const *pCreateInfo, SURFACE_STATE *surface_state,
11087                                              SWAPCHAIN_NODE *old_swapchain_state) {
11088    auto most_recent_swapchain = surface_state->swapchain ? surface_state->swapchain : surface_state->old_swapchain;
11089
11090    // TODO: revisit this. some of these rules are being relaxed.
11091    if (most_recent_swapchain != old_swapchain_state || (surface_state->old_swapchain && surface_state->swapchain)) {
11092        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11093                    reinterpret_cast<uint64_t>(dev_data->device), __LINE__, DRAWSTATE_SWAPCHAIN_ALREADY_EXISTS, "DS",
11094                    "%s: surface has an existing swapchain other than oldSwapchain", func_name))
11095            return true;
11096    }
11097    if (old_swapchain_state && old_swapchain_state->createInfo.surface != pCreateInfo->surface) {
11098        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
11099                    reinterpret_cast<uint64_t const &>(pCreateInfo->oldSwapchain), __LINE__, DRAWSTATE_SWAPCHAIN_WRONG_SURFACE,
11100                    "DS", "%s: pCreateInfo->oldSwapchain's surface is not pCreateInfo->surface", func_name))
11101            return true;
11102    }
11103    auto physical_device_state = getPhysicalDeviceState(dev_data->instance_data, dev_data->physical_device);
11104    if (physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState == UNCALLED) {
11105        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
11106                    reinterpret_cast<uint64_t>(dev_data->physical_device), __LINE__, DRAWSTATE_SWAPCHAIN_CREATE_BEFORE_QUERY, "DS",
11107                    "%s: surface capabilities not retrieved for this physical device", func_name))
11108            return true;
11109    } else {  // have valid capabilities
11110        auto &capabilities = physical_device_state->surfaceCapabilities;
11111        // Validate pCreateInfo->minImageCount against VkSurfaceCapabilitiesKHR::{min|max}ImageCount:
11112        if (pCreateInfo->minImageCount < capabilities.minImageCount) {
11113            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11114                        reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02331, "DS",
11115                        "%s called with minImageCount = %d, which is outside the bounds returned "
11116                        "by vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. minImageCount = %d, maxImageCount = %d). %s",
11117                        func_name, pCreateInfo->minImageCount, capabilities.minImageCount, capabilities.maxImageCount,
11118                        validation_error_map[VALIDATION_ERROR_02331]))
11119                return true;
11120        }
11121
11122        if ((capabilities.maxImageCount > 0) && (pCreateInfo->minImageCount > capabilities.maxImageCount)) {
11123            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11124                        reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02332, "DS",
11125                        "%s called with minImageCount = %d, which is outside the bounds returned "
11126                        "by vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. minImageCount = %d, maxImageCount = %d). %s",
11127                        func_name, pCreateInfo->minImageCount, capabilities.minImageCount, capabilities.maxImageCount,
11128                        validation_error_map[VALIDATION_ERROR_02332]))
11129                return true;
11130        }
11131
11132        // Validate pCreateInfo->imageExtent against VkSurfaceCapabilitiesKHR::{current|min|max}ImageExtent:
11133        if ((capabilities.currentExtent.width == kSurfaceSizeFromSwapchain) &&
11134            ((pCreateInfo->imageExtent.width < capabilities.minImageExtent.width) ||
11135             (pCreateInfo->imageExtent.width > capabilities.maxImageExtent.width) ||
11136             (pCreateInfo->imageExtent.height < capabilities.minImageExtent.height) ||
11137             (pCreateInfo->imageExtent.height > capabilities.maxImageExtent.height))) {
11138            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11139                        reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02334, "DS",
11140                        "%s called with imageExtent = (%d,%d), which is outside the bounds returned by "
11141                        "vkGetPhysicalDeviceSurfaceCapabilitiesKHR(): currentExtent = (%d,%d), minImageExtent = (%d,%d), "
11142                        "maxImageExtent = (%d,%d). %s",
11143                        func_name, pCreateInfo->imageExtent.width, pCreateInfo->imageExtent.height,
11144                        capabilities.currentExtent.width, capabilities.currentExtent.height, capabilities.minImageExtent.width,
11145                        capabilities.minImageExtent.height, capabilities.maxImageExtent.width, capabilities.maxImageExtent.height,
11146                        validation_error_map[VALIDATION_ERROR_02334]))
11147                return true;
11148        }
11149        if ((capabilities.currentExtent.width != kSurfaceSizeFromSwapchain) &&
11150            ((pCreateInfo->imageExtent.width != capabilities.currentExtent.width) ||
11151             (pCreateInfo->imageExtent.height != capabilities.currentExtent.height))) {
11152            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11153                        reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02334, "DS",
11154                        "%s called with imageExtent = (%d,%d), which is not equal to the currentExtent = (%d,%d) returned by "
11155                        "vkGetPhysicalDeviceSurfaceCapabilitiesKHR(). %s",
11156                        func_name, pCreateInfo->imageExtent.width, pCreateInfo->imageExtent.height,
11157                        capabilities.currentExtent.width, capabilities.currentExtent.height,
11158                        validation_error_map[VALIDATION_ERROR_02334]))
11159                return true;
11160        }
11161        // pCreateInfo->preTransform should have exactly one bit set, and that bit must also be set in
11162        // VkSurfaceCapabilitiesKHR::supportedTransforms.
11163        if (!pCreateInfo->preTransform || (pCreateInfo->preTransform & (pCreateInfo->preTransform - 1)) ||
11164            !(pCreateInfo->preTransform & capabilities.supportedTransforms)) {
11165            // This is an error situation; one for which we'd like to give the developer a helpful, multi-line error message.  Build
11166            // it up a little at a time, and then log it:
11167            std::string errorString = "";
11168            char str[1024];
11169            // Here's the first part of the message:
11170            sprintf(str, "%s called with a non-supported pCreateInfo->preTransform (i.e. %s).  Supported values are:\n", func_name,
11171                    string_VkSurfaceTransformFlagBitsKHR(pCreateInfo->preTransform));
11172            errorString += str;
11173            for (int i = 0; i < 32; i++) {
11174                // Build up the rest of the message:
11175                if ((1 << i) & capabilities.supportedTransforms) {
11176                    const char *newStr = string_VkSurfaceTransformFlagBitsKHR((VkSurfaceTransformFlagBitsKHR)(1 << i));
11177                    sprintf(str, "    %s\n", newStr);
11178                    errorString += str;
11179                }
11180            }
11181            // Log the message that we've built up:
11182            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11183                        reinterpret_cast<uint64_t &>(dev_data->device), __LINE__, VALIDATION_ERROR_02339, "DS", "%s. %s",
11184                        errorString.c_str(), validation_error_map[VALIDATION_ERROR_02339]))
11185                return true;
11186        }
11187
11188        // pCreateInfo->compositeAlpha should have exactly one bit set, and that bit must also be set in
11189        // VkSurfaceCapabilitiesKHR::supportedCompositeAlpha
11190        if (!pCreateInfo->compositeAlpha || (pCreateInfo->compositeAlpha & (pCreateInfo->compositeAlpha - 1)) ||
11191            !((pCreateInfo->compositeAlpha) & capabilities.supportedCompositeAlpha)) {
11192            // This is an error situation; one for which we'd like to give the developer a helpful, multi-line error message.  Build
11193            // it up a little at a time, and then log it:
11194            std::string errorString = "";
11195            char str[1024];
11196            // Here's the first part of the message:
11197            sprintf(str, "%s called with a non-supported pCreateInfo->compositeAlpha (i.e. %s).  Supported values are:\n",
11198                    func_name, string_VkCompositeAlphaFlagBitsKHR(pCreateInfo->compositeAlpha));
11199            errorString += str;
11200            for (int i = 0; i < 32; i++) {
11201                // Build up the rest of the message:
11202                if ((1 << i) & capabilities.supportedCompositeAlpha) {
11203                    const char *newStr = string_VkCompositeAlphaFlagBitsKHR((VkCompositeAlphaFlagBitsKHR)(1 << i));
11204                    sprintf(str, "    %s\n", newStr);
11205                    errorString += str;
11206                }
11207            }
11208            // Log the message that we've built up:
11209            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11210                        reinterpret_cast<uint64_t &>(dev_data->device), __LINE__, VALIDATION_ERROR_02340, "DS", "%s. %s",
11211                        errorString.c_str(), validation_error_map[VALIDATION_ERROR_02340]))
11212                return true;
11213        }
11214        // Validate pCreateInfo->imageArrayLayers against VkSurfaceCapabilitiesKHR::maxImageArrayLayers:
11215        if ((pCreateInfo->imageArrayLayers < 1) || (pCreateInfo->imageArrayLayers > capabilities.maxImageArrayLayers)) {
11216            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11217                        reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02335, "DS",
11218                        "%s called with a non-supported imageArrayLayers (i.e. %d).  Minimum value is 1, maximum value is %d. %s",
11219                        func_name, pCreateInfo->imageArrayLayers, capabilities.maxImageArrayLayers,
11220                        validation_error_map[VALIDATION_ERROR_02335]))
11221                return true;
11222        }
11223        // Validate pCreateInfo->imageUsage against VkSurfaceCapabilitiesKHR::supportedUsageFlags:
11224        if (pCreateInfo->imageUsage != (pCreateInfo->imageUsage & capabilities.supportedUsageFlags)) {
11225            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11226                        reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02336, "DS",
11227                        "%s called with a non-supported pCreateInfo->imageUsage (i.e. 0x%08x).  Supported flag bits are 0x%08x. %s",
11228                        func_name, pCreateInfo->imageUsage, capabilities.supportedUsageFlags,
11229                        validation_error_map[VALIDATION_ERROR_02336]))
11230                return true;
11231        }
11232    }
11233
11234    // Validate pCreateInfo values with the results of vkGetPhysicalDeviceSurfaceFormatsKHR():
11235    if (physical_device_state->vkGetPhysicalDeviceSurfaceFormatsKHRState != QUERY_DETAILS) {
11236        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11237                    reinterpret_cast<uint64_t>(dev_data->device), __LINE__, DRAWSTATE_SWAPCHAIN_CREATE_BEFORE_QUERY, "DS",
11238                    "%s called before calling vkGetPhysicalDeviceSurfaceFormatsKHR().", func_name))
11239            return true;
11240    } else {
11241        // Validate pCreateInfo->imageFormat against VkSurfaceFormatKHR::format:
11242        bool foundFormat = false;
11243        bool foundColorSpace = false;
11244        bool foundMatch = false;
11245        for (auto const &format : physical_device_state->surface_formats) {
11246            if (pCreateInfo->imageFormat == format.format) {
11247                // Validate pCreateInfo->imageColorSpace against VkSurfaceFormatKHR::colorSpace:
11248                foundFormat = true;
11249                if (pCreateInfo->imageColorSpace == format.colorSpace) {
11250                    foundMatch = true;
11251                    break;
11252                }
11253            } else {
11254                if (pCreateInfo->imageColorSpace == format.colorSpace) {
11255                    foundColorSpace = true;
11256                }
11257            }
11258        }
11259        if (!foundMatch) {
11260            if (!foundFormat) {
11261                if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11262                            reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02333, "DS",
11263                            "%s called with a non-supported pCreateInfo->imageFormat (i.e. %d). %s", func_name,
11264                            pCreateInfo->imageFormat, validation_error_map[VALIDATION_ERROR_02333]))
11265                    return true;
11266            }
11267            if (!foundColorSpace) {
11268                if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11269                            reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02333, "DS",
11270                            "%s called with a non-supported pCreateInfo->imageColorSpace (i.e. %d). %s", func_name,
11271                            pCreateInfo->imageColorSpace, validation_error_map[VALIDATION_ERROR_02333]))
11272                    return true;
11273            }
11274        }
11275    }
11276
11277    // Validate pCreateInfo values with the results of vkGetPhysicalDeviceSurfacePresentModesKHR():
11278    if (physical_device_state->vkGetPhysicalDeviceSurfacePresentModesKHRState != QUERY_DETAILS) {
11279        // FIFO is required to always be supported
11280        if (pCreateInfo->presentMode != VK_PRESENT_MODE_FIFO_KHR) {
11281            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11282                        reinterpret_cast<uint64_t>(dev_data->device), __LINE__, DRAWSTATE_SWAPCHAIN_CREATE_BEFORE_QUERY, "DS",
11283                        "%s called before calling vkGetPhysicalDeviceSurfacePresentModesKHR().", func_name))
11284                return true;
11285        }
11286    } else {
11287        // Validate pCreateInfo->presentMode against vkGetPhysicalDeviceSurfacePresentModesKHR():
11288        bool foundMatch = std::find(physical_device_state->present_modes.begin(), physical_device_state->present_modes.end(),
11289                                    pCreateInfo->presentMode) != physical_device_state->present_modes.end();
11290        if (!foundMatch) {
11291            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11292                        reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02341, "DS",
11293                        "%s called with a non-supported presentMode (i.e. %s). %s", func_name,
11294                        string_VkPresentModeKHR(pCreateInfo->presentMode), validation_error_map[VALIDATION_ERROR_02341]))
11295                return true;
11296        }
11297    }
11298
11299    return false;
11300}
11301
11302static void PostCallRecordCreateSwapchainKHR(layer_data *dev_data, VkResult result, const VkSwapchainCreateInfoKHR *pCreateInfo,
11303                                             VkSwapchainKHR *pSwapchain, SURFACE_STATE *surface_state,
11304                                             SWAPCHAIN_NODE *old_swapchain_state) {
11305    if (VK_SUCCESS == result) {
11306        std::lock_guard<std::mutex> lock(global_lock);
11307        auto swapchain_state = unique_ptr<SWAPCHAIN_NODE>(new SWAPCHAIN_NODE(pCreateInfo, *pSwapchain));
11308        surface_state->swapchain = swapchain_state.get();
11309        dev_data->device_extensions.swapchainMap[*pSwapchain] = std::move(swapchain_state);
11310    } else {
11311        surface_state->swapchain = nullptr;
11312    }
11313    // Spec requires that even if CreateSwapchainKHR fails, oldSwapchain behaves as replaced.
11314    if (old_swapchain_state) {
11315        old_swapchain_state->replaced = true;
11316    }
11317    surface_state->old_swapchain = old_swapchain_state;
11318    return;
11319}
11320
11321VKAPI_ATTR VkResult VKAPI_CALL CreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
11322                                                  const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchain) {
11323    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11324    auto surface_state = getSurfaceState(dev_data->instance_data, pCreateInfo->surface);
11325    auto old_swapchain_state = getSwapchainNode(dev_data, pCreateInfo->oldSwapchain);
11326
11327    if (PreCallValidateCreateSwapchainKHR(dev_data, "vkCreateSwapChainKHR()", pCreateInfo, surface_state, old_swapchain_state)) {
11328        return VK_ERROR_VALIDATION_FAILED_EXT;
11329    }
11330
11331    VkResult result = dev_data->dispatch_table.CreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);
11332
11333    PostCallRecordCreateSwapchainKHR(dev_data, result, pCreateInfo, pSwapchain, surface_state, old_swapchain_state);
11334
11335    return result;
11336}
11337
11338VKAPI_ATTR void VKAPI_CALL DestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
11339    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11340    bool skip_call = false;
11341
11342    std::unique_lock<std::mutex> lock(global_lock);
11343    auto swapchain_data = getSwapchainNode(dev_data, swapchain);
11344    if (swapchain_data) {
11345        if (swapchain_data->images.size() > 0) {
11346            for (auto swapchain_image : swapchain_data->images) {
11347                auto image_sub = dev_data->imageSubresourceMap.find(swapchain_image);
11348                if (image_sub != dev_data->imageSubresourceMap.end()) {
11349                    for (auto imgsubpair : image_sub->second) {
11350                        auto image_item = dev_data->imageLayoutMap.find(imgsubpair);
11351                        if (image_item != dev_data->imageLayoutMap.end()) {
11352                            dev_data->imageLayoutMap.erase(image_item);
11353                        }
11354                    }
11355                    dev_data->imageSubresourceMap.erase(image_sub);
11356                }
11357                skip_call =
11358                    ClearMemoryObjectBindings(dev_data, (uint64_t)swapchain_image, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT);
11359                dev_data->imageMap.erase(swapchain_image);
11360            }
11361        }
11362
11363        auto surface_state = getSurfaceState(dev_data->instance_data, swapchain_data->createInfo.surface);
11364        if (surface_state) {
11365            if (surface_state->swapchain == swapchain_data) surface_state->swapchain = nullptr;
11366            if (surface_state->old_swapchain == swapchain_data) surface_state->old_swapchain = nullptr;
11367        }
11368
11369        dev_data->device_extensions.swapchainMap.erase(swapchain);
11370    }
11371    lock.unlock();
11372    if (!skip_call) dev_data->dispatch_table.DestroySwapchainKHR(device, swapchain, pAllocator);
11373}
11374
11375VKAPI_ATTR VkResult VKAPI_CALL GetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pCount,
11376                                                     VkImage *pSwapchainImages) {
11377    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11378    VkResult result = dev_data->dispatch_table.GetSwapchainImagesKHR(device, swapchain, pCount, pSwapchainImages);
11379
11380    if (result == VK_SUCCESS && pSwapchainImages != NULL) {
11381        // This should never happen and is checked by param checker.
11382        if (!pCount) return result;
11383        std::lock_guard<std::mutex> lock(global_lock);
11384        const size_t count = *pCount;
11385        auto swapchain_node = getSwapchainNode(dev_data, swapchain);
11386        if (swapchain_node && !swapchain_node->images.empty()) {
11387            // TODO : Not sure I like the memcmp here, but it works
11388            const bool mismatch = (swapchain_node->images.size() != count ||
11389                                   memcmp(&swapchain_node->images[0], pSwapchainImages, sizeof(swapchain_node->images[0]) * count));
11390            if (mismatch) {
11391                // TODO: Verify against Valid Usage section of extension
11392                log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
11393                        (uint64_t)swapchain, __LINE__, MEMTRACK_NONE, "SWAP_CHAIN",
11394                        "vkGetSwapchainInfoKHR(0x%" PRIx64
11395                        ", VK_SWAP_CHAIN_INFO_TYPE_PERSISTENT_IMAGES_KHR) returned mismatching data",
11396                        (uint64_t)(swapchain));
11397            }
11398        }
11399        for (uint32_t i = 0; i < *pCount; ++i) {
11400            IMAGE_LAYOUT_NODE image_layout_node;
11401            image_layout_node.layout = VK_IMAGE_LAYOUT_UNDEFINED;
11402            image_layout_node.format = swapchain_node->createInfo.imageFormat;
11403            // Add imageMap entries for each swapchain image
11404            VkImageCreateInfo image_ci = {};
11405            image_ci.mipLevels = 1;
11406            image_ci.arrayLayers = swapchain_node->createInfo.imageArrayLayers;
11407            image_ci.usage = swapchain_node->createInfo.imageUsage;
11408            image_ci.format = swapchain_node->createInfo.imageFormat;
11409            image_ci.samples = VK_SAMPLE_COUNT_1_BIT;
11410            image_ci.extent.width = swapchain_node->createInfo.imageExtent.width;
11411            image_ci.extent.height = swapchain_node->createInfo.imageExtent.height;
11412            image_ci.sharingMode = swapchain_node->createInfo.imageSharingMode;
11413            dev_data->imageMap[pSwapchainImages[i]] = unique_ptr<IMAGE_STATE>(new IMAGE_STATE(pSwapchainImages[i], &image_ci));
11414            auto &image_state = dev_data->imageMap[pSwapchainImages[i]];
11415            image_state->valid = false;
11416            image_state->binding.mem = MEMTRACKER_SWAP_CHAIN_IMAGE_KEY;
11417            swapchain_node->images.push_back(pSwapchainImages[i]);
11418            ImageSubresourcePair subpair = {pSwapchainImages[i], false, VkImageSubresource()};
11419            dev_data->imageSubresourceMap[pSwapchainImages[i]].push_back(subpair);
11420            dev_data->imageLayoutMap[subpair] = image_layout_node;
11421            dev_data->device_extensions.imageToSwapchainMap[pSwapchainImages[i]] = swapchain;
11422        }
11423    }
11424    return result;
11425}
11426
11427VKAPI_ATTR VkResult VKAPI_CALL QueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) {
11428    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
11429    bool skip_call = false;
11430
11431    std::lock_guard<std::mutex> lock(global_lock);
11432    auto queue_state = getQueueState(dev_data, queue);
11433
11434    for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
11435        auto pSemaphore = getSemaphoreNode(dev_data, pPresentInfo->pWaitSemaphores[i]);
11436        if (pSemaphore && !pSemaphore->signaled) {
11437            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
11438                                 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS,
11439                                 "DS", "Queue 0x%p is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.", queue,
11440                                 reinterpret_cast<const uint64_t &>(pPresentInfo->pWaitSemaphores[i]));
11441        }
11442    }
11443
11444    for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
11445        auto swapchain_data = getSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
11446        if (swapchain_data) {
11447            if (pPresentInfo->pImageIndices[i] >= swapchain_data->images.size()) {
11448                skip_call |= log_msg(
11449                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
11450                    reinterpret_cast<uint64_t const &>(pPresentInfo->pSwapchains[i]), __LINE__, DRAWSTATE_SWAPCHAIN_INVALID_IMAGE,
11451                    "DS", "vkQueuePresentKHR: Swapchain image index too large (%u). There are only %u images in this swapchain.",
11452                    pPresentInfo->pImageIndices[i], (uint32_t)swapchain_data->images.size());
11453            } else {
11454                auto image = swapchain_data->images[pPresentInfo->pImageIndices[i]];
11455                auto image_state = getImageState(dev_data, image);
11456                skip_call |= ValidateImageMemoryIsValid(dev_data, image_state, "vkQueuePresentKHR()");
11457
11458                if (!image_state->acquired) {
11459                    skip_call |= log_msg(
11460                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
11461                        reinterpret_cast<uint64_t const &>(pPresentInfo->pSwapchains[i]), __LINE__,
11462                        DRAWSTATE_SWAPCHAIN_IMAGE_NOT_ACQUIRED, "DS",
11463                        "vkQueuePresentKHR: Swapchain image index %u has not been acquired.", pPresentInfo->pImageIndices[i]);
11464                }
11465
11466                vector<VkImageLayout> layouts;
11467                if (FindLayouts(dev_data, image, layouts)) {
11468                    for (auto layout : layouts) {
11469                        if (layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) {
11470                            skip_call |=
11471                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
11472                                        reinterpret_cast<uint64_t &>(queue), __LINE__, VALIDATION_ERROR_01964, "DS",
11473                                        "Images passed to present must be in layout "
11474                                        "VK_IMAGE_LAYOUT_PRESENT_SRC_KHR but is in %s. %s",
11475                                        string_VkImageLayout(layout), validation_error_map[VALIDATION_ERROR_01964]);
11476                        }
11477                    }
11478                }
11479            }
11480
11481            // All physical devices and queue families are required to be able
11482            // to present to any native window on Android; require the
11483            // application to have established support on any other platform.
11484            if (!dev_data->instance_data->androidSurfaceExtensionEnabled) {
11485                auto surface_state = getSurfaceState(dev_data->instance_data, swapchain_data->createInfo.surface);
11486                auto support_it = surface_state->gpu_queue_support.find({dev_data->physical_device, queue_state->queueFamilyIndex});
11487
11488                if (support_it == surface_state->gpu_queue_support.end()) {
11489                    skip_call |=
11490                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
11491                                reinterpret_cast<uint64_t const &>(pPresentInfo->pSwapchains[i]), __LINE__,
11492                                DRAWSTATE_SWAPCHAIN_UNSUPPORTED_QUEUE, "DS",
11493                                "vkQueuePresentKHR: Presenting image without calling "
11494                                "vkGetPhysicalDeviceSurfaceSupportKHR");
11495                } else if (!support_it->second) {
11496                    skip_call |= log_msg(
11497                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
11498                        reinterpret_cast<uint64_t const &>(pPresentInfo->pSwapchains[i]), __LINE__, VALIDATION_ERROR_01961, "DS",
11499                        "vkQueuePresentKHR: Presenting image on queue that cannot "
11500                        "present to this surface. %s",
11501                        validation_error_map[VALIDATION_ERROR_01961]);
11502                }
11503            }
11504        }
11505    }
11506
11507    if (skip_call) {
11508        return VK_ERROR_VALIDATION_FAILED_EXT;
11509    }
11510
11511    VkResult result = dev_data->dispatch_table.QueuePresentKHR(queue, pPresentInfo);
11512
11513    if (result != VK_ERROR_VALIDATION_FAILED_EXT) {
11514        // Semaphore waits occur before error generation, if the call reached
11515        // the ICD. (Confirm?)
11516        for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
11517            auto pSemaphore = getSemaphoreNode(dev_data, pPresentInfo->pWaitSemaphores[i]);
11518            if (pSemaphore) {
11519                pSemaphore->signaler.first = VK_NULL_HANDLE;
11520                pSemaphore->signaled = false;
11521            }
11522        }
11523
11524        for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
11525            // Note: this is imperfect, in that we can get confused about what
11526            // did or didn't succeed-- but if the app does that, it's confused
11527            // itself just as much.
11528            auto local_result = pPresentInfo->pResults ? pPresentInfo->pResults[i] : result;
11529
11530            if (local_result != VK_SUCCESS && local_result != VK_SUBOPTIMAL_KHR) continue;  // this present didn't actually happen.
11531
11532            // Mark the image as having been released to the WSI
11533            auto swapchain_data = getSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
11534            auto image = swapchain_data->images[pPresentInfo->pImageIndices[i]];
11535            auto image_state = getImageState(dev_data, image);
11536            image_state->acquired = false;
11537        }
11538
11539        // Note: even though presentation is directed to a queue, there is no
11540        // direct ordering between QP and subsequent work, so QP (and its
11541        // semaphore waits) /never/ participate in any completion proof.
11542    }
11543
11544    return result;
11545}
11546
11547static bool PreCallValidateCreateSharedSwapchainsKHR(layer_data *dev_data, uint32_t swapchainCount,
11548                                                     const VkSwapchainCreateInfoKHR *pCreateInfos, VkSwapchainKHR *pSwapchains,
11549                                                     std::vector<SURFACE_STATE *> &surface_state,
11550                                                     std::vector<SWAPCHAIN_NODE *> &old_swapchain_state) {
11551    if (pCreateInfos) {
11552        std::lock_guard<std::mutex> lock(global_lock);
11553        for (uint32_t i = 0; i < swapchainCount; i++) {
11554            surface_state.push_back(getSurfaceState(dev_data->instance_data, pCreateInfos[i].surface));
11555            old_swapchain_state.push_back(getSwapchainNode(dev_data, pCreateInfos[i].oldSwapchain));
11556            std::stringstream func_name;
11557            func_name << "vkCreateSharedSwapchainsKHR[" << swapchainCount << "]";
11558            if (PreCallValidateCreateSwapchainKHR(dev_data, func_name.str().c_str(), &pCreateInfos[i], surface_state[i],
11559                                                  old_swapchain_state[i])) {
11560                return true;
11561            }
11562        }
11563    }
11564    return false;
11565}
11566
11567static void PostCallRecordCreateSharedSwapchainsKHR(layer_data *dev_data, VkResult result, uint32_t swapchainCount,
11568                                                    const VkSwapchainCreateInfoKHR *pCreateInfos, VkSwapchainKHR *pSwapchains,
11569                                                    std::vector<SURFACE_STATE *> &surface_state,
11570                                                    std::vector<SWAPCHAIN_NODE *> &old_swapchain_state) {
11571    if (VK_SUCCESS == result) {
11572        for (uint32_t i = 0; i < swapchainCount; i++) {
11573            auto swapchain_state = unique_ptr<SWAPCHAIN_NODE>(new SWAPCHAIN_NODE(&pCreateInfos[i], pSwapchains[i]));
11574            surface_state[i]->swapchain = swapchain_state.get();
11575            dev_data->device_extensions.swapchainMap[pSwapchains[i]] = std::move(swapchain_state);
11576        }
11577    } else {
11578        for (uint32_t i = 0; i < swapchainCount; i++) {
11579            surface_state[i]->swapchain = nullptr;
11580        }
11581    }
11582    // Spec requires that even if CreateSharedSwapchainKHR fails, oldSwapchain behaves as replaced.
11583    for (uint32_t i = 0; i < swapchainCount; i++) {
11584        if (old_swapchain_state[i]) {
11585            old_swapchain_state[i]->replaced = true;
11586        }
11587        surface_state[i]->old_swapchain = old_swapchain_state[i];
11588    }
11589    return;
11590}
11591
11592VKAPI_ATTR VkResult VKAPI_CALL CreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount,
11593                                                         const VkSwapchainCreateInfoKHR *pCreateInfos,
11594                                                         const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchains) {
11595    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11596    std::vector<SURFACE_STATE *> surface_state;
11597    std::vector<SWAPCHAIN_NODE *> old_swapchain_state;
11598
11599    if (PreCallValidateCreateSharedSwapchainsKHR(dev_data, swapchainCount, pCreateInfos, pSwapchains, surface_state,
11600                                                 old_swapchain_state)) {
11601        return VK_ERROR_VALIDATION_FAILED_EXT;
11602    }
11603
11604    VkResult result =
11605        dev_data->dispatch_table.CreateSharedSwapchainsKHR(device, swapchainCount, pCreateInfos, pAllocator, pSwapchains);
11606
11607    PostCallRecordCreateSharedSwapchainsKHR(dev_data, result, swapchainCount, pCreateInfos, pSwapchains, surface_state,
11608                                            old_swapchain_state);
11609
11610    return result;
11611}
11612
11613VKAPI_ATTR VkResult VKAPI_CALL AcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
11614                                                   VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
11615    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11616    bool skip_call = false;
11617
11618    std::unique_lock<std::mutex> lock(global_lock);
11619
11620    if (fence == VK_NULL_HANDLE && semaphore == VK_NULL_HANDLE) {
11621        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11622                             reinterpret_cast<uint64_t &>(device), __LINE__, DRAWSTATE_SWAPCHAIN_NO_SYNC_FOR_ACQUIRE, "DS",
11623                             "vkAcquireNextImageKHR: Semaphore and fence cannot both be VK_NULL_HANDLE. There would be no way "
11624                             "to determine the completion of this operation.");
11625    }
11626
11627    auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
11628    if (pSemaphore && pSemaphore->signaled) {
11629        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
11630                             reinterpret_cast<const uint64_t &>(semaphore), __LINE__, VALIDATION_ERROR_01952, "DS",
11631                             "vkAcquireNextImageKHR: Semaphore must not be currently signaled or in a wait state. %s",
11632                             validation_error_map[VALIDATION_ERROR_01952]);
11633    }
11634
11635    auto pFence = getFenceNode(dev_data, fence);
11636    if (pFence) {
11637        skip_call |= ValidateFenceForSubmit(dev_data, pFence);
11638    }
11639
11640    auto swapchain_data = getSwapchainNode(dev_data, swapchain);
11641
11642    if (swapchain_data->replaced) {
11643        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
11644                             reinterpret_cast<uint64_t &>(swapchain), __LINE__, DRAWSTATE_SWAPCHAIN_REPLACED, "DS",
11645                             "vkAcquireNextImageKHR: This swapchain has been replaced. The application can still "
11646                             "present any images it has acquired, but cannot acquire any more.");
11647    }
11648
11649    auto physical_device_state = getPhysicalDeviceState(dev_data->instance_data, dev_data->physical_device);
11650    if (physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState != UNCALLED) {
11651        uint64_t acquired_images = std::count_if(swapchain_data->images.begin(), swapchain_data->images.end(),
11652                                                 [=](VkImage image) { return getImageState(dev_data, image)->acquired; });
11653        if (acquired_images > swapchain_data->images.size() - physical_device_state->surfaceCapabilities.minImageCount) {
11654            skip_call |=
11655                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
11656                        reinterpret_cast<uint64_t const &>(swapchain), __LINE__, DRAWSTATE_SWAPCHAIN_TOO_MANY_IMAGES, "DS",
11657                        "vkAcquireNextImageKHR: Application has already acquired the maximum number of images (0x%" PRIxLEAST64 ")",
11658                        acquired_images);
11659        }
11660    }
11661
11662    if (swapchain_data->images.size() == 0) {
11663        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
11664                             reinterpret_cast<uint64_t const &>(swapchain), __LINE__, DRAWSTATE_SWAPCHAIN_IMAGES_NOT_FOUND, "DS",
11665                             "vkAcquireNextImageKHR: No images found to acquire from. Application probably did not call "
11666                             "vkGetSwapchainImagesKHR after swapchain creation.");
11667    }
11668
11669    lock.unlock();
11670
11671    if (skip_call) return VK_ERROR_VALIDATION_FAILED_EXT;
11672
11673    VkResult result = dev_data->dispatch_table.AcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex);
11674
11675    lock.lock();
11676    if (result == VK_SUCCESS || result == VK_SUBOPTIMAL_KHR) {
11677        if (pFence) {
11678            pFence->state = FENCE_INFLIGHT;
11679            pFence->signaler.first = VK_NULL_HANDLE;  // ANI isn't on a queue, so this can't participate in a completion proof.
11680        }
11681
11682        // A successful call to AcquireNextImageKHR counts as a signal operation on semaphore
11683        if (pSemaphore) {
11684            pSemaphore->signaled = true;
11685            pSemaphore->signaler.first = VK_NULL_HANDLE;
11686        }
11687
11688        // Mark the image as acquired.
11689        auto image = swapchain_data->images[*pImageIndex];
11690        auto image_state = getImageState(dev_data, image);
11691        image_state->acquired = true;
11692    }
11693    lock.unlock();
11694
11695    return result;
11696}
11697
11698VKAPI_ATTR VkResult VKAPI_CALL EnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount,
11699                                                        VkPhysicalDevice *pPhysicalDevices) {
11700    bool skip_call = false;
11701    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
11702    assert(instance_data);
11703
11704    // For this instance, flag when vkEnumeratePhysicalDevices goes to QUERY_COUNT and then QUERY_DETAILS
11705    if (NULL == pPhysicalDevices) {
11706        instance_data->vkEnumeratePhysicalDevicesState = QUERY_COUNT;
11707    } else {
11708        if (UNCALLED == instance_data->vkEnumeratePhysicalDevicesState) {
11709            // Flag warning here. You can call this without having queried the count, but it may not be
11710            // robust on platforms with multiple physical devices.
11711            skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11712                                 VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, 0, __LINE__, DEVLIMITS_MISSING_QUERY_COUNT, "DL",
11713                                 "Call sequence has vkEnumeratePhysicalDevices() w/ non-NULL pPhysicalDevices. You should first "
11714                                 "call vkEnumeratePhysicalDevices() w/ NULL pPhysicalDevices to query pPhysicalDeviceCount.");
11715        }  // TODO : Could also flag a warning if re-calling this function in QUERY_DETAILS state
11716        else if (instance_data->physical_devices_count != *pPhysicalDeviceCount) {
11717            // Having actual count match count from app is not a requirement, so this can be a warning
11718            skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11719                                 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
11720                                 "Call to vkEnumeratePhysicalDevices() w/ pPhysicalDeviceCount value %u, but actual count "
11721                                 "supported by this instance is %u.",
11722                                 *pPhysicalDeviceCount, instance_data->physical_devices_count);
11723        }
11724        instance_data->vkEnumeratePhysicalDevicesState = QUERY_DETAILS;
11725    }
11726    if (skip_call) {
11727        return VK_ERROR_VALIDATION_FAILED_EXT;
11728    }
11729    VkResult result = instance_data->dispatch_table.EnumeratePhysicalDevices(instance, pPhysicalDeviceCount, pPhysicalDevices);
11730    if (NULL == pPhysicalDevices) {
11731        instance_data->physical_devices_count = *pPhysicalDeviceCount;
11732    } else if (result == VK_SUCCESS) {  // Save physical devices
11733        for (uint32_t i = 0; i < *pPhysicalDeviceCount; i++) {
11734            auto &phys_device_state = instance_data->physical_device_map[pPhysicalDevices[i]];
11735            phys_device_state.phys_device = pPhysicalDevices[i];
11736            // Init actual features for each physical device
11737            instance_data->dispatch_table.GetPhysicalDeviceFeatures(pPhysicalDevices[i], &phys_device_state.features);
11738        }
11739    }
11740    return result;
11741}
11742
11743// Common function to handle validation for GetPhysicalDeviceQueueFamilyProperties & 2KHR version
11744static bool ValidateCommonGetPhysicalDeviceQueueFamilyProperties(instance_layer_data *instance_data,
11745                                                                 PHYSICAL_DEVICE_STATE *pd_state,
11746                                                                 uint32_t *pQueueFamilyPropertyCount, bool qfp_null,
11747                                                                 const char *count_var_name, const char *caller_name) {
11748    bool skip = false;
11749    if (qfp_null) {
11750        pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_COUNT;
11751    } else {
11752        // Verify that for each physical device, this function is called first with NULL pQueueFamilyProperties ptr in order to get
11753        // count
11754        if (UNCALLED == pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState) {
11755            skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11756                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_MISSING_QUERY_COUNT, "DL",
11757                            "Call sequence has %s() w/ non-NULL "
11758                            "pQueueFamilyProperties. You should first call %s() w/ "
11759                            "NULL pQueueFamilyProperties to query pCount.",
11760                            caller_name, caller_name);
11761        }
11762        // Then verify that pCount that is passed in on second call matches what was returned
11763        if (pd_state->queueFamilyPropertiesCount != *pQueueFamilyPropertyCount) {
11764            // TODO: this is not a requirement of the Valid Usage section for vkGetPhysicalDeviceQueueFamilyProperties, so
11765            // provide as warning
11766            skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11767                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
11768                            "Call to %s() w/ %s value %u, but actual count supported by this physicalDevice is %u.", caller_name,
11769                            count_var_name, *pQueueFamilyPropertyCount, pd_state->queueFamilyPropertiesCount);
11770        }
11771        pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_DETAILS;
11772    }
11773    return skip;
11774}
11775
11776static bool PreCallValidateGetPhysicalDeviceQueueFamilyProperties(instance_layer_data *instance_data,
11777                                                                  PHYSICAL_DEVICE_STATE *pd_state, uint32_t *pCount,
11778                                                                  VkQueueFamilyProperties *pQueueFamilyProperties) {
11779    return ValidateCommonGetPhysicalDeviceQueueFamilyProperties(instance_data, pd_state, pCount,
11780                                                                (nullptr == pQueueFamilyProperties), "pCount",
11781                                                                "vkGetPhysicalDeviceQueueFamilyProperties()");
11782}
11783
11784static bool PreCallValidateGetPhysicalDeviceQueueFamilyProperties2KHR(instance_layer_data *instance_data,
11785                                                                      PHYSICAL_DEVICE_STATE *pd_state,
11786                                                                      uint32_t *pQueueFamilyPropertyCount,
11787                                                                      VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
11788    return ValidateCommonGetPhysicalDeviceQueueFamilyProperties(instance_data, pd_state, pQueueFamilyPropertyCount,
11789                                                                (nullptr == pQueueFamilyProperties), "pQueueFamilyPropertyCount",
11790                                                                "vkGetPhysicalDeviceQueueFamilyProperties2KHR()");
11791}
11792
11793// Common function to update state for GetPhysicalDeviceQueueFamilyProperties & 2KHR version
11794static void StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(PHYSICAL_DEVICE_STATE *pd_state, uint32_t count,
11795                                                                    VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
11796    if (!pQueueFamilyProperties) {
11797        pd_state->queueFamilyPropertiesCount = count;
11798    } else {  // Save queue family properties
11799        if (pd_state->queue_family_properties.size() < count) pd_state->queue_family_properties.resize(count);
11800        for (uint32_t i = 0; i < count; i++) {
11801            pd_state->queue_family_properties[i] = pQueueFamilyProperties[i].queueFamilyProperties;
11802        }
11803    }
11804}
11805
11806static void PostCallRecordGetPhysicalDeviceQueueFamilyProperties(PHYSICAL_DEVICE_STATE *pd_state, uint32_t count,
11807                                                                 VkQueueFamilyProperties *pQueueFamilyProperties) {
11808    VkQueueFamilyProperties2KHR *pqfp = nullptr;
11809    std::vector<VkQueueFamilyProperties2KHR> qfp;
11810    qfp.resize(count);
11811    if (pQueueFamilyProperties) {
11812        for (uint32_t i = 0; i < count; ++i) {
11813            qfp[i].sType = VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2_KHR;
11814            qfp[i].pNext = nullptr;
11815            qfp[i].queueFamilyProperties = pQueueFamilyProperties[i];
11816        }
11817        pqfp = qfp.data();
11818    }
11819    StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(pd_state, count, pqfp);
11820}
11821
11822static void PostCallRecordGetPhysicalDeviceQueueFamilyProperties2KHR(PHYSICAL_DEVICE_STATE *pd_state, uint32_t count,
11823                                                                     VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
11824    StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(pd_state, count, pQueueFamilyProperties);
11825}
11826
11827VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
11828                                                                  VkQueueFamilyProperties *pQueueFamilyProperties) {
11829    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11830    auto physical_device_state = getPhysicalDeviceState(instance_data, physicalDevice);
11831    assert(physical_device_state);
11832    bool skip =
11833        PreCallValidateGetPhysicalDeviceQueueFamilyProperties(instance_data, physical_device_state, pCount, pQueueFamilyProperties);
11834    if (skip) {
11835        return;
11836    }
11837    instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(physicalDevice, pCount, pQueueFamilyProperties);
11838    PostCallRecordGetPhysicalDeviceQueueFamilyProperties(physical_device_state, *pCount, pQueueFamilyProperties);
11839}
11840
11841VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceQueueFamilyProperties2KHR(VkPhysicalDevice physicalDevice,
11842                                                                      uint32_t *pQueueFamilyPropertyCount,
11843                                                                      VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
11844    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11845    auto physical_device_state = getPhysicalDeviceState(instance_data, physicalDevice);
11846    assert(physical_device_state);
11847    bool skip = PreCallValidateGetPhysicalDeviceQueueFamilyProperties2KHR(instance_data, physical_device_state,
11848                                                                          pQueueFamilyPropertyCount, pQueueFamilyProperties);
11849    if (skip) {
11850        return;
11851    }
11852    instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties2KHR(physicalDevice, pQueueFamilyPropertyCount,
11853                                                                             pQueueFamilyProperties);
11854    PostCallRecordGetPhysicalDeviceQueueFamilyProperties2KHR(physical_device_state, *pQueueFamilyPropertyCount,
11855                                                             pQueueFamilyProperties);
11856}
11857
11858template <typename TCreateInfo, typename FPtr>
11859static VkResult CreateSurface(VkInstance instance, TCreateInfo const *pCreateInfo, VkAllocationCallbacks const *pAllocator,
11860                              VkSurfaceKHR *pSurface, FPtr fptr) {
11861    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
11862
11863    // Call down the call chain:
11864    VkResult result = (instance_data->dispatch_table.*fptr)(instance, pCreateInfo, pAllocator, pSurface);
11865
11866    if (result == VK_SUCCESS) {
11867        std::unique_lock<std::mutex> lock(global_lock);
11868        instance_data->surface_map[*pSurface] = SURFACE_STATE(*pSurface);
11869        lock.unlock();
11870    }
11871
11872    return result;
11873}
11874
11875VKAPI_ATTR void VKAPI_CALL DestroySurfaceKHR(VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks *pAllocator) {
11876    bool skip_call = false;
11877    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
11878    std::unique_lock<std::mutex> lock(global_lock);
11879    auto surface_state = getSurfaceState(instance_data, surface);
11880
11881    if (surface_state) {
11882        // TODO: track swapchains created from this surface.
11883        instance_data->surface_map.erase(surface);
11884    }
11885    lock.unlock();
11886
11887    if (!skip_call) {
11888        // Call down the call chain:
11889        instance_data->dispatch_table.DestroySurfaceKHR(instance, surface, pAllocator);
11890    }
11891}
11892
11893VKAPI_ATTR VkResult VKAPI_CALL CreateDisplayPlaneSurfaceKHR(VkInstance instance, const VkDisplaySurfaceCreateInfoKHR *pCreateInfo,
11894                                                            const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11895    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateDisplayPlaneSurfaceKHR);
11896}
11897
11898#ifdef VK_USE_PLATFORM_ANDROID_KHR
11899VKAPI_ATTR VkResult VKAPI_CALL CreateAndroidSurfaceKHR(VkInstance instance, const VkAndroidSurfaceCreateInfoKHR *pCreateInfo,
11900                                                       const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11901    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateAndroidSurfaceKHR);
11902}
11903#endif  // VK_USE_PLATFORM_ANDROID_KHR
11904
11905#ifdef VK_USE_PLATFORM_MIR_KHR
11906VKAPI_ATTR VkResult VKAPI_CALL CreateMirSurfaceKHR(VkInstance instance, const VkMirSurfaceCreateInfoKHR *pCreateInfo,
11907                                                   const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11908    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateMirSurfaceKHR);
11909}
11910#endif  // VK_USE_PLATFORM_MIR_KHR
11911
11912#ifdef VK_USE_PLATFORM_WAYLAND_KHR
11913VKAPI_ATTR VkResult VKAPI_CALL CreateWaylandSurfaceKHR(VkInstance instance, const VkWaylandSurfaceCreateInfoKHR *pCreateInfo,
11914                                                       const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11915    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateWaylandSurfaceKHR);
11916}
11917#endif  // VK_USE_PLATFORM_WAYLAND_KHR
11918
11919#ifdef VK_USE_PLATFORM_WIN32_KHR
11920VKAPI_ATTR VkResult VKAPI_CALL CreateWin32SurfaceKHR(VkInstance instance, const VkWin32SurfaceCreateInfoKHR *pCreateInfo,
11921                                                     const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11922    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateWin32SurfaceKHR);
11923}
11924#endif  // VK_USE_PLATFORM_WIN32_KHR
11925
11926#ifdef VK_USE_PLATFORM_XCB_KHR
11927VKAPI_ATTR VkResult VKAPI_CALL CreateXcbSurfaceKHR(VkInstance instance, const VkXcbSurfaceCreateInfoKHR *pCreateInfo,
11928                                                   const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11929    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateXcbSurfaceKHR);
11930}
11931#endif  // VK_USE_PLATFORM_XCB_KHR
11932
11933#ifdef VK_USE_PLATFORM_XLIB_KHR
11934VKAPI_ATTR VkResult VKAPI_CALL CreateXlibSurfaceKHR(VkInstance instance, const VkXlibSurfaceCreateInfoKHR *pCreateInfo,
11935                                                    const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11936    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateXlibSurfaceKHR);
11937}
11938#endif  // VK_USE_PLATFORM_XLIB_KHR
11939
11940VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
11941                                                                       VkSurfaceCapabilitiesKHR *pSurfaceCapabilities) {
11942    auto instance_data = get_my_data_ptr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11943
11944    std::unique_lock<std::mutex> lock(global_lock);
11945    auto physical_device_state = getPhysicalDeviceState(instance_data, physicalDevice);
11946    lock.unlock();
11947
11948    auto result =
11949        instance_data->dispatch_table.GetPhysicalDeviceSurfaceCapabilitiesKHR(physicalDevice, surface, pSurfaceCapabilities);
11950
11951    if (result == VK_SUCCESS) {
11952        physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState = QUERY_DETAILS;
11953        physical_device_state->surfaceCapabilities = *pSurfaceCapabilities;
11954    }
11955
11956    return result;
11957}
11958
11959VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex,
11960                                                                  VkSurfaceKHR surface, VkBool32 *pSupported) {
11961    auto instance_data = get_my_data_ptr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11962    std::unique_lock<std::mutex> lock(global_lock);
11963    auto surface_state = getSurfaceState(instance_data, surface);
11964    lock.unlock();
11965
11966    auto result =
11967        instance_data->dispatch_table.GetPhysicalDeviceSurfaceSupportKHR(physicalDevice, queueFamilyIndex, surface, pSupported);
11968
11969    if (result == VK_SUCCESS) {
11970        surface_state->gpu_queue_support[{physicalDevice, queueFamilyIndex}] = (*pSupported != 0);
11971    }
11972
11973    return result;
11974}
11975
11976VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfacePresentModesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
11977                                                                       uint32_t *pPresentModeCount,
11978                                                                       VkPresentModeKHR *pPresentModes) {
11979    bool skip_call = false;
11980    auto instance_data = get_my_data_ptr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11981    std::unique_lock<std::mutex> lock(global_lock);
11982    // TODO: this isn't quite right. available modes may differ by surface AND physical device.
11983    auto physical_device_state = getPhysicalDeviceState(instance_data, physicalDevice);
11984    auto &call_state = physical_device_state->vkGetPhysicalDeviceSurfacePresentModesKHRState;
11985
11986    if (pPresentModes) {
11987        // Compare the preliminary value of *pPresentModeCount with the value this time:
11988        auto prev_mode_count = (uint32_t)physical_device_state->present_modes.size();
11989        switch (call_state) {
11990            case UNCALLED:
11991                skip_call |= log_msg(
11992                    instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
11993                    reinterpret_cast<uint64_t>(physicalDevice), __LINE__, DEVLIMITS_MUST_QUERY_COUNT, "DL",
11994                    "vkGetPhysicalDeviceSurfacePresentModesKHR() called with non-NULL pPresentModeCount; but no prior positive "
11995                    "value has been seen for pPresentModeCount.");
11996                break;
11997            default:
11998                // both query count and query details
11999                if (*pPresentModeCount != prev_mode_count) {
12000                    skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
12001                                         VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
12002                                         reinterpret_cast<uint64_t>(physicalDevice), __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
12003                                         "vkGetPhysicalDeviceSurfacePresentModesKHR() called with *pPresentModeCount (%u) that "
12004                                         "differs from the value "
12005                                         "(%u) that was returned when pPresentModes was NULL.",
12006                                         *pPresentModeCount, prev_mode_count);
12007                }
12008                break;
12009        }
12010    }
12011    lock.unlock();
12012
12013    if (skip_call) return VK_ERROR_VALIDATION_FAILED_EXT;
12014
12015    auto result = instance_data->dispatch_table.GetPhysicalDeviceSurfacePresentModesKHR(physicalDevice, surface, pPresentModeCount,
12016                                                                                        pPresentModes);
12017
12018    if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
12019        lock.lock();
12020
12021        if (*pPresentModeCount) {
12022            if (call_state < QUERY_COUNT) call_state = QUERY_COUNT;
12023            if (*pPresentModeCount > physical_device_state->present_modes.size())
12024                physical_device_state->present_modes.resize(*pPresentModeCount);
12025        }
12026        if (pPresentModes) {
12027            if (call_state < QUERY_DETAILS) call_state = QUERY_DETAILS;
12028            for (uint32_t i = 0; i < *pPresentModeCount; i++) {
12029                physical_device_state->present_modes[i] = pPresentModes[i];
12030            }
12031        }
12032    }
12033
12034    return result;
12035}
12036
12037VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
12038                                                                  uint32_t *pSurfaceFormatCount,
12039                                                                  VkSurfaceFormatKHR *pSurfaceFormats) {
12040    bool skip_call = false;
12041    auto instance_data = get_my_data_ptr(get_dispatch_key(physicalDevice), instance_layer_data_map);
12042    std::unique_lock<std::mutex> lock(global_lock);
12043    auto physical_device_state = getPhysicalDeviceState(instance_data, physicalDevice);
12044    auto &call_state = physical_device_state->vkGetPhysicalDeviceSurfaceFormatsKHRState;
12045
12046    if (pSurfaceFormats) {
12047        auto prev_format_count = (uint32_t)physical_device_state->surface_formats.size();
12048
12049        switch (call_state) {
12050            case UNCALLED:
12051                // Since we haven't recorded a preliminary value of *pSurfaceFormatCount, that likely means that the application
12052                // didn't
12053                // previously call this function with a NULL value of pSurfaceFormats:
12054                skip_call |= log_msg(
12055                    instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
12056                    reinterpret_cast<uint64_t>(physicalDevice), __LINE__, DEVLIMITS_MUST_QUERY_COUNT, "DL",
12057                    "vkGetPhysicalDeviceSurfaceFormatsKHR() called with non-NULL pSurfaceFormatCount; but no prior positive "
12058                    "value has been seen for pSurfaceFormats.");
12059                break;
12060            default:
12061                if (prev_format_count != *pSurfaceFormatCount) {
12062                    skip_call |= log_msg(
12063                        instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
12064                        VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, reinterpret_cast<uint64_t>(physicalDevice), __LINE__,
12065                        DEVLIMITS_COUNT_MISMATCH, "DL",
12066                        "vkGetPhysicalDeviceSurfaceFormatsKHR() called with non-NULL pSurfaceFormatCount, and with pSurfaceFormats "
12067                        "set "
12068                        "to "
12069                        "a value (%u) that is greater than the value (%u) that was returned when pSurfaceFormatCount was NULL.",
12070                        *pSurfaceFormatCount, prev_format_count);
12071                }
12072                break;
12073        }
12074    }
12075    lock.unlock();
12076
12077    if (skip_call) return VK_ERROR_VALIDATION_FAILED_EXT;
12078
12079    // Call down the call chain:
12080    auto result = instance_data->dispatch_table.GetPhysicalDeviceSurfaceFormatsKHR(physicalDevice, surface, pSurfaceFormatCount,
12081                                                                                   pSurfaceFormats);
12082
12083    if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
12084        lock.lock();
12085
12086        if (*pSurfaceFormatCount) {
12087            if (call_state < QUERY_COUNT) call_state = QUERY_COUNT;
12088            if (*pSurfaceFormatCount > physical_device_state->surface_formats.size())
12089                physical_device_state->surface_formats.resize(*pSurfaceFormatCount);
12090        }
12091        if (pSurfaceFormats) {
12092            if (call_state < QUERY_DETAILS) call_state = QUERY_DETAILS;
12093            for (uint32_t i = 0; i < *pSurfaceFormatCount; i++) {
12094                physical_device_state->surface_formats[i] = pSurfaceFormats[i];
12095            }
12096        }
12097    }
12098    return result;
12099}
12100
12101VKAPI_ATTR VkResult VKAPI_CALL CreateDebugReportCallbackEXT(VkInstance instance,
12102                                                            const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
12103                                                            const VkAllocationCallbacks *pAllocator,
12104                                                            VkDebugReportCallbackEXT *pMsgCallback) {
12105    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
12106    VkResult res = instance_data->dispatch_table.CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
12107    if (VK_SUCCESS == res) {
12108        std::lock_guard<std::mutex> lock(global_lock);
12109        res = layer_create_msg_callback(instance_data->report_data, false, pCreateInfo, pAllocator, pMsgCallback);
12110    }
12111    return res;
12112}
12113
12114VKAPI_ATTR void VKAPI_CALL DestroyDebugReportCallbackEXT(VkInstance instance, VkDebugReportCallbackEXT msgCallback,
12115                                                         const VkAllocationCallbacks *pAllocator) {
12116    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
12117    instance_data->dispatch_table.DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
12118    std::lock_guard<std::mutex> lock(global_lock);
12119    layer_destroy_msg_callback(instance_data->report_data, msgCallback, pAllocator);
12120}
12121
12122VKAPI_ATTR void VKAPI_CALL DebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags,
12123                                                 VkDebugReportObjectTypeEXT objType, uint64_t object, size_t location,
12124                                                 int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
12125    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
12126    instance_data->dispatch_table.DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, pMsg);
12127}
12128
12129VKAPI_ATTR VkResult VKAPI_CALL EnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
12130    return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
12131}
12132
12133VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
12134                                                              VkLayerProperties *pProperties) {
12135    return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
12136}
12137
12138VKAPI_ATTR VkResult VKAPI_CALL EnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount,
12139                                                                    VkExtensionProperties *pProperties) {
12140    if (pLayerName && !strcmp(pLayerName, global_layer.layerName))
12141        return util_GetExtensionProperties(1, instance_extensions, pCount, pProperties);
12142
12143    return VK_ERROR_LAYER_NOT_PRESENT;
12144}
12145
12146VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice, const char *pLayerName,
12147                                                                  uint32_t *pCount, VkExtensionProperties *pProperties) {
12148    if (pLayerName && !strcmp(pLayerName, global_layer.layerName)) return util_GetExtensionProperties(0, NULL, pCount, pProperties);
12149
12150    assert(physicalDevice);
12151
12152    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(physicalDevice), instance_layer_data_map);
12153    return instance_data->dispatch_table.EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties);
12154}
12155
12156static PFN_vkVoidFunction intercept_core_instance_command(const char *name);
12157
12158static PFN_vkVoidFunction intercept_core_device_command(const char *name);
12159
12160static PFN_vkVoidFunction intercept_khr_swapchain_command(const char *name, VkDevice dev);
12161
12162static PFN_vkVoidFunction intercept_khr_surface_command(const char *name, VkInstance instance);
12163
12164static PFN_vkVoidFunction intercept_extension_instance_commands(const char *name, VkInstance instance);
12165
12166VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetDeviceProcAddr(VkDevice dev, const char *funcName) {
12167    PFN_vkVoidFunction proc = intercept_core_device_command(funcName);
12168    if (proc) return proc;
12169
12170    assert(dev);
12171
12172    proc = intercept_khr_swapchain_command(funcName, dev);
12173    if (proc) return proc;
12174
12175    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(dev), layer_data_map);
12176
12177    auto &table = dev_data->dispatch_table;
12178    if (!table.GetDeviceProcAddr) return nullptr;
12179    return table.GetDeviceProcAddr(dev, funcName);
12180}
12181
12182VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetInstanceProcAddr(VkInstance instance, const char *funcName) {
12183    PFN_vkVoidFunction proc = intercept_core_instance_command(funcName);
12184    if (!proc) proc = intercept_core_device_command(funcName);
12185    if (!proc) proc = intercept_khr_swapchain_command(funcName, VK_NULL_HANDLE);
12186    if (!proc) proc = intercept_khr_surface_command(funcName, instance);
12187    if (proc) return proc;
12188
12189    assert(instance);
12190
12191    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
12192    proc = debug_report_get_instance_proc_addr(instance_data->report_data, funcName);
12193    if (proc) return proc;
12194
12195    proc = intercept_extension_instance_commands(funcName, instance);
12196    if (proc) return proc;
12197
12198    auto &table = instance_data->dispatch_table;
12199    if (!table.GetInstanceProcAddr) return nullptr;
12200    return table.GetInstanceProcAddr(instance, funcName);
12201}
12202
12203VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetPhysicalDeviceProcAddr(VkInstance instance, const char *funcName) {
12204    assert(instance);
12205
12206    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
12207
12208    auto &table = instance_data->dispatch_table;
12209    if (!table.GetPhysicalDeviceProcAddr) return nullptr;
12210    return table.GetPhysicalDeviceProcAddr(instance, funcName);
12211}
12212
12213static PFN_vkVoidFunction intercept_core_instance_command(const char *name) {
12214    static const struct {
12215        const char *name;
12216        PFN_vkVoidFunction proc;
12217    } core_instance_commands[] = {
12218        {"vkGetInstanceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetInstanceProcAddr)},
12219        {"vk_layerGetPhysicalDeviceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceProcAddr)},
12220        {"vkGetDeviceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceProcAddr)},
12221        {"vkCreateInstance", reinterpret_cast<PFN_vkVoidFunction>(CreateInstance)},
12222        {"vkCreateDevice", reinterpret_cast<PFN_vkVoidFunction>(CreateDevice)},
12223        {"vkEnumeratePhysicalDevices", reinterpret_cast<PFN_vkVoidFunction>(EnumeratePhysicalDevices)},
12224        {"vkGetPhysicalDeviceQueueFamilyProperties", reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceQueueFamilyProperties)},
12225        {"vkDestroyInstance", reinterpret_cast<PFN_vkVoidFunction>(DestroyInstance)},
12226        {"vkEnumerateInstanceLayerProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateInstanceLayerProperties)},
12227        {"vkEnumerateDeviceLayerProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateDeviceLayerProperties)},
12228        {"vkEnumerateInstanceExtensionProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateInstanceExtensionProperties)},
12229        {"vkEnumerateDeviceExtensionProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateDeviceExtensionProperties)},
12230    };
12231
12232    for (size_t i = 0; i < ARRAY_SIZE(core_instance_commands); i++) {
12233        if (!strcmp(core_instance_commands[i].name, name)) return core_instance_commands[i].proc;
12234    }
12235
12236    return nullptr;
12237}
12238
12239static PFN_vkVoidFunction intercept_core_device_command(const char *name) {
12240    static const struct {
12241        const char *name;
12242        PFN_vkVoidFunction proc;
12243    } core_device_commands[] = {
12244        {"vkGetDeviceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceProcAddr)},
12245        {"vkQueueSubmit", reinterpret_cast<PFN_vkVoidFunction>(QueueSubmit)},
12246        {"vkWaitForFences", reinterpret_cast<PFN_vkVoidFunction>(WaitForFences)},
12247        {"vkGetFenceStatus", reinterpret_cast<PFN_vkVoidFunction>(GetFenceStatus)},
12248        {"vkQueueWaitIdle", reinterpret_cast<PFN_vkVoidFunction>(QueueWaitIdle)},
12249        {"vkDeviceWaitIdle", reinterpret_cast<PFN_vkVoidFunction>(DeviceWaitIdle)},
12250        {"vkGetDeviceQueue", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceQueue)},
12251        {"vkDestroyInstance", reinterpret_cast<PFN_vkVoidFunction>(DestroyInstance)},
12252        {"vkDestroyDevice", reinterpret_cast<PFN_vkVoidFunction>(DestroyDevice)},
12253        {"vkDestroyFence", reinterpret_cast<PFN_vkVoidFunction>(DestroyFence)},
12254        {"vkResetFences", reinterpret_cast<PFN_vkVoidFunction>(ResetFences)},
12255        {"vkDestroySemaphore", reinterpret_cast<PFN_vkVoidFunction>(DestroySemaphore)},
12256        {"vkDestroyEvent", reinterpret_cast<PFN_vkVoidFunction>(DestroyEvent)},
12257        {"vkDestroyQueryPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyQueryPool)},
12258        {"vkDestroyBuffer", reinterpret_cast<PFN_vkVoidFunction>(DestroyBuffer)},
12259        {"vkDestroyBufferView", reinterpret_cast<PFN_vkVoidFunction>(DestroyBufferView)},
12260        {"vkDestroyImage", reinterpret_cast<PFN_vkVoidFunction>(DestroyImage)},
12261        {"vkDestroyImageView", reinterpret_cast<PFN_vkVoidFunction>(DestroyImageView)},
12262        {"vkDestroyShaderModule", reinterpret_cast<PFN_vkVoidFunction>(DestroyShaderModule)},
12263        {"vkDestroyPipeline", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipeline)},
12264        {"vkDestroyPipelineLayout", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipelineLayout)},
12265        {"vkDestroySampler", reinterpret_cast<PFN_vkVoidFunction>(DestroySampler)},
12266        {"vkDestroyDescriptorSetLayout", reinterpret_cast<PFN_vkVoidFunction>(DestroyDescriptorSetLayout)},
12267        {"vkDestroyDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyDescriptorPool)},
12268        {"vkDestroyFramebuffer", reinterpret_cast<PFN_vkVoidFunction>(DestroyFramebuffer)},
12269        {"vkDestroyRenderPass", reinterpret_cast<PFN_vkVoidFunction>(DestroyRenderPass)},
12270        {"vkCreateBuffer", reinterpret_cast<PFN_vkVoidFunction>(CreateBuffer)},
12271        {"vkCreateBufferView", reinterpret_cast<PFN_vkVoidFunction>(CreateBufferView)},
12272        {"vkCreateImage", reinterpret_cast<PFN_vkVoidFunction>(CreateImage)},
12273        {"vkCreateImageView", reinterpret_cast<PFN_vkVoidFunction>(CreateImageView)},
12274        {"vkCreateFence", reinterpret_cast<PFN_vkVoidFunction>(CreateFence)},
12275        {"vkCreatePipelineCache", reinterpret_cast<PFN_vkVoidFunction>(CreatePipelineCache)},
12276        {"vkDestroyPipelineCache", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipelineCache)},
12277        {"vkGetPipelineCacheData", reinterpret_cast<PFN_vkVoidFunction>(GetPipelineCacheData)},
12278        {"vkMergePipelineCaches", reinterpret_cast<PFN_vkVoidFunction>(MergePipelineCaches)},
12279        {"vkCreateGraphicsPipelines", reinterpret_cast<PFN_vkVoidFunction>(CreateGraphicsPipelines)},
12280        {"vkCreateComputePipelines", reinterpret_cast<PFN_vkVoidFunction>(CreateComputePipelines)},
12281        {"vkCreateSampler", reinterpret_cast<PFN_vkVoidFunction>(CreateSampler)},
12282        {"vkCreateDescriptorSetLayout", reinterpret_cast<PFN_vkVoidFunction>(CreateDescriptorSetLayout)},
12283        {"vkCreatePipelineLayout", reinterpret_cast<PFN_vkVoidFunction>(CreatePipelineLayout)},
12284        {"vkCreateDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(CreateDescriptorPool)},
12285        {"vkResetDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(ResetDescriptorPool)},
12286        {"vkAllocateDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(AllocateDescriptorSets)},
12287        {"vkFreeDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(FreeDescriptorSets)},
12288        {"vkUpdateDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(UpdateDescriptorSets)},
12289        {"vkCreateCommandPool", reinterpret_cast<PFN_vkVoidFunction>(CreateCommandPool)},
12290        {"vkDestroyCommandPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyCommandPool)},
12291        {"vkResetCommandPool", reinterpret_cast<PFN_vkVoidFunction>(ResetCommandPool)},
12292        {"vkCreateQueryPool", reinterpret_cast<PFN_vkVoidFunction>(CreateQueryPool)},
12293        {"vkAllocateCommandBuffers", reinterpret_cast<PFN_vkVoidFunction>(AllocateCommandBuffers)},
12294        {"vkFreeCommandBuffers", reinterpret_cast<PFN_vkVoidFunction>(FreeCommandBuffers)},
12295        {"vkBeginCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(BeginCommandBuffer)},
12296        {"vkEndCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(EndCommandBuffer)},
12297        {"vkResetCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(ResetCommandBuffer)},
12298        {"vkCmdBindPipeline", reinterpret_cast<PFN_vkVoidFunction>(CmdBindPipeline)},
12299        {"vkCmdSetViewport", reinterpret_cast<PFN_vkVoidFunction>(CmdSetViewport)},
12300        {"vkCmdSetScissor", reinterpret_cast<PFN_vkVoidFunction>(CmdSetScissor)},
12301        {"vkCmdSetLineWidth", reinterpret_cast<PFN_vkVoidFunction>(CmdSetLineWidth)},
12302        {"vkCmdSetDepthBias", reinterpret_cast<PFN_vkVoidFunction>(CmdSetDepthBias)},
12303        {"vkCmdSetBlendConstants", reinterpret_cast<PFN_vkVoidFunction>(CmdSetBlendConstants)},
12304        {"vkCmdSetDepthBounds", reinterpret_cast<PFN_vkVoidFunction>(CmdSetDepthBounds)},
12305        {"vkCmdSetStencilCompareMask", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilCompareMask)},
12306        {"vkCmdSetStencilWriteMask", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilWriteMask)},
12307        {"vkCmdSetStencilReference", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilReference)},
12308        {"vkCmdBindDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(CmdBindDescriptorSets)},
12309        {"vkCmdBindVertexBuffers", reinterpret_cast<PFN_vkVoidFunction>(CmdBindVertexBuffers)},
12310        {"vkCmdBindIndexBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdBindIndexBuffer)},
12311        {"vkCmdDraw", reinterpret_cast<PFN_vkVoidFunction>(CmdDraw)},
12312        {"vkCmdDrawIndexed", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndexed)},
12313        {"vkCmdDrawIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndirect)},
12314        {"vkCmdDrawIndexedIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndexedIndirect)},
12315        {"vkCmdDispatch", reinterpret_cast<PFN_vkVoidFunction>(CmdDispatch)},
12316        {"vkCmdDispatchIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDispatchIndirect)},
12317        {"vkCmdCopyBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyBuffer)},
12318        {"vkCmdCopyImage", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyImage)},
12319        {"vkCmdBlitImage", reinterpret_cast<PFN_vkVoidFunction>(CmdBlitImage)},
12320        {"vkCmdCopyBufferToImage", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyBufferToImage)},
12321        {"vkCmdCopyImageToBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyImageToBuffer)},
12322        {"vkCmdUpdateBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdUpdateBuffer)},
12323        {"vkCmdFillBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdFillBuffer)},
12324        {"vkCmdClearColorImage", reinterpret_cast<PFN_vkVoidFunction>(CmdClearColorImage)},
12325        {"vkCmdClearDepthStencilImage", reinterpret_cast<PFN_vkVoidFunction>(CmdClearDepthStencilImage)},
12326        {"vkCmdClearAttachments", reinterpret_cast<PFN_vkVoidFunction>(CmdClearAttachments)},
12327        {"vkCmdResolveImage", reinterpret_cast<PFN_vkVoidFunction>(CmdResolveImage)},
12328        {"vkCmdSetEvent", reinterpret_cast<PFN_vkVoidFunction>(CmdSetEvent)},
12329        {"vkCmdResetEvent", reinterpret_cast<PFN_vkVoidFunction>(CmdResetEvent)},
12330        {"vkCmdWaitEvents", reinterpret_cast<PFN_vkVoidFunction>(CmdWaitEvents)},
12331        {"vkCmdPipelineBarrier", reinterpret_cast<PFN_vkVoidFunction>(CmdPipelineBarrier)},
12332        {"vkCmdBeginQuery", reinterpret_cast<PFN_vkVoidFunction>(CmdBeginQuery)},
12333        {"vkCmdEndQuery", reinterpret_cast<PFN_vkVoidFunction>(CmdEndQuery)},
12334        {"vkCmdResetQueryPool", reinterpret_cast<PFN_vkVoidFunction>(CmdResetQueryPool)},
12335        {"vkCmdCopyQueryPoolResults", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyQueryPoolResults)},
12336        {"vkCmdPushConstants", reinterpret_cast<PFN_vkVoidFunction>(CmdPushConstants)},
12337        {"vkCmdWriteTimestamp", reinterpret_cast<PFN_vkVoidFunction>(CmdWriteTimestamp)},
12338        {"vkCreateFramebuffer", reinterpret_cast<PFN_vkVoidFunction>(CreateFramebuffer)},
12339        {"vkCreateShaderModule", reinterpret_cast<PFN_vkVoidFunction>(CreateShaderModule)},
12340        {"vkCreateRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CreateRenderPass)},
12341        {"vkCmdBeginRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CmdBeginRenderPass)},
12342        {"vkCmdNextSubpass", reinterpret_cast<PFN_vkVoidFunction>(CmdNextSubpass)},
12343        {"vkCmdEndRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CmdEndRenderPass)},
12344        {"vkCmdExecuteCommands", reinterpret_cast<PFN_vkVoidFunction>(CmdExecuteCommands)},
12345        {"vkSetEvent", reinterpret_cast<PFN_vkVoidFunction>(SetEvent)},
12346        {"vkMapMemory", reinterpret_cast<PFN_vkVoidFunction>(MapMemory)},
12347        {"vkUnmapMemory", reinterpret_cast<PFN_vkVoidFunction>(UnmapMemory)},
12348        {"vkFlushMappedMemoryRanges", reinterpret_cast<PFN_vkVoidFunction>(FlushMappedMemoryRanges)},
12349        {"vkInvalidateMappedMemoryRanges", reinterpret_cast<PFN_vkVoidFunction>(InvalidateMappedMemoryRanges)},
12350        {"vkAllocateMemory", reinterpret_cast<PFN_vkVoidFunction>(AllocateMemory)},
12351        {"vkFreeMemory", reinterpret_cast<PFN_vkVoidFunction>(FreeMemory)},
12352        {"vkBindBufferMemory", reinterpret_cast<PFN_vkVoidFunction>(BindBufferMemory)},
12353        {"vkGetBufferMemoryRequirements", reinterpret_cast<PFN_vkVoidFunction>(GetBufferMemoryRequirements)},
12354        {"vkGetImageMemoryRequirements", reinterpret_cast<PFN_vkVoidFunction>(GetImageMemoryRequirements)},
12355        {"vkGetQueryPoolResults", reinterpret_cast<PFN_vkVoidFunction>(GetQueryPoolResults)},
12356        {"vkBindImageMemory", reinterpret_cast<PFN_vkVoidFunction>(BindImageMemory)},
12357        {"vkQueueBindSparse", reinterpret_cast<PFN_vkVoidFunction>(QueueBindSparse)},
12358        {"vkCreateSemaphore", reinterpret_cast<PFN_vkVoidFunction>(CreateSemaphore)},
12359        {"vkCreateEvent", reinterpret_cast<PFN_vkVoidFunction>(CreateEvent)},
12360    };
12361
12362    for (size_t i = 0; i < ARRAY_SIZE(core_device_commands); i++) {
12363        if (!strcmp(core_device_commands[i].name, name)) return core_device_commands[i].proc;
12364    }
12365
12366    return nullptr;
12367}
12368
12369static PFN_vkVoidFunction intercept_khr_swapchain_command(const char *name, VkDevice dev) {
12370    static const struct {
12371        const char *name;
12372        PFN_vkVoidFunction proc;
12373    } khr_swapchain_commands[] = {
12374        {"vkCreateSwapchainKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateSwapchainKHR)},
12375        {"vkDestroySwapchainKHR", reinterpret_cast<PFN_vkVoidFunction>(DestroySwapchainKHR)},
12376        {"vkGetSwapchainImagesKHR", reinterpret_cast<PFN_vkVoidFunction>(GetSwapchainImagesKHR)},
12377        {"vkAcquireNextImageKHR", reinterpret_cast<PFN_vkVoidFunction>(AcquireNextImageKHR)},
12378        {"vkQueuePresentKHR", reinterpret_cast<PFN_vkVoidFunction>(QueuePresentKHR)},
12379    };
12380    layer_data *dev_data = nullptr;
12381
12382    if (dev) {
12383        dev_data = get_my_data_ptr(get_dispatch_key(dev), layer_data_map);
12384        if (!dev_data->device_extensions.wsi_enabled) return nullptr;
12385    }
12386
12387    for (size_t i = 0; i < ARRAY_SIZE(khr_swapchain_commands); i++) {
12388        if (!strcmp(khr_swapchain_commands[i].name, name)) return khr_swapchain_commands[i].proc;
12389    }
12390
12391    if (dev_data) {
12392        if (!dev_data->device_extensions.wsi_display_swapchain_enabled) return nullptr;
12393    }
12394
12395    if (!strcmp("vkCreateSharedSwapchainsKHR", name)) return reinterpret_cast<PFN_vkVoidFunction>(CreateSharedSwapchainsKHR);
12396
12397    return nullptr;
12398}
12399
12400static PFN_vkVoidFunction intercept_khr_surface_command(const char *name, VkInstance instance) {
12401    static const struct {
12402        const char *name;
12403        PFN_vkVoidFunction proc;
12404        bool instance_layer_data::*enable;
12405    } khr_surface_commands[] = {
12406#ifdef VK_USE_PLATFORM_ANDROID_KHR
12407        {"vkCreateAndroidSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateAndroidSurfaceKHR),
12408         &instance_layer_data::androidSurfaceExtensionEnabled},
12409#endif  // VK_USE_PLATFORM_ANDROID_KHR
12410#ifdef VK_USE_PLATFORM_MIR_KHR
12411        {"vkCreateMirSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateMirSurfaceKHR),
12412         &instance_layer_data::mirSurfaceExtensionEnabled},
12413#endif  // VK_USE_PLATFORM_MIR_KHR
12414#ifdef VK_USE_PLATFORM_WAYLAND_KHR
12415        {"vkCreateWaylandSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateWaylandSurfaceKHR),
12416         &instance_layer_data::waylandSurfaceExtensionEnabled},
12417#endif  // VK_USE_PLATFORM_WAYLAND_KHR
12418#ifdef VK_USE_PLATFORM_WIN32_KHR
12419        {"vkCreateWin32SurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateWin32SurfaceKHR),
12420         &instance_layer_data::win32SurfaceExtensionEnabled},
12421#endif  // VK_USE_PLATFORM_WIN32_KHR
12422#ifdef VK_USE_PLATFORM_XCB_KHR
12423        {"vkCreateXcbSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateXcbSurfaceKHR),
12424         &instance_layer_data::xcbSurfaceExtensionEnabled},
12425#endif  // VK_USE_PLATFORM_XCB_KHR
12426#ifdef VK_USE_PLATFORM_XLIB_KHR
12427        {"vkCreateXlibSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateXlibSurfaceKHR),
12428         &instance_layer_data::xlibSurfaceExtensionEnabled},
12429#endif  // VK_USE_PLATFORM_XLIB_KHR
12430        {"vkCreateDisplayPlaneSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateDisplayPlaneSurfaceKHR),
12431         &instance_layer_data::displayExtensionEnabled},
12432        {"vkDestroySurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(DestroySurfaceKHR),
12433         &instance_layer_data::surfaceExtensionEnabled},
12434        {"vkGetPhysicalDeviceSurfaceCapabilitiesKHR", reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceSurfaceCapabilitiesKHR),
12435         &instance_layer_data::surfaceExtensionEnabled},
12436        {"vkGetPhysicalDeviceSurfaceSupportKHR", reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceSurfaceSupportKHR),
12437         &instance_layer_data::surfaceExtensionEnabled},
12438        {"vkGetPhysicalDeviceSurfacePresentModesKHR", reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceSurfacePresentModesKHR),
12439         &instance_layer_data::surfaceExtensionEnabled},
12440        {"vkGetPhysicalDeviceSurfaceFormatsKHR", reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceSurfaceFormatsKHR),
12441         &instance_layer_data::surfaceExtensionEnabled},
12442    };
12443
12444    instance_layer_data *instance_data = nullptr;
12445    if (instance) {
12446        instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
12447    }
12448
12449    for (size_t i = 0; i < ARRAY_SIZE(khr_surface_commands); i++) {
12450        if (!strcmp(khr_surface_commands[i].name, name)) {
12451            if (instance_data && !(instance_data->*(khr_surface_commands[i].enable))) return nullptr;
12452            return khr_surface_commands[i].proc;
12453        }
12454    }
12455
12456    return nullptr;
12457}
12458
12459static PFN_vkVoidFunction intercept_extension_instance_commands(const char *name, VkInstance instance) {
12460    static const struct {
12461        const char *name;
12462        PFN_vkVoidFunction proc;
12463        bool instance_layer_data::*enable;
12464    } instance_extension_commands[] = {
12465        {"vkGetPhysicalDeviceQueueFamilyProperties2KHR",
12466         reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceQueueFamilyProperties2KHR)},
12467    };
12468
12469    for (size_t i = 0; i < ARRAY_SIZE(instance_extension_commands); i++) {
12470        if (!strcmp(instance_extension_commands[i].name, name)) {
12471            return instance_extension_commands[i].proc;
12472        }
12473    }
12474    return nullptr;
12475}
12476
12477}  // namespace core_validation
12478
12479// vk_layer_logging.h expects these to be defined
12480
12481VKAPI_ATTR VkResult VKAPI_CALL vkCreateDebugReportCallbackEXT(VkInstance instance,
12482                                                              const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
12483                                                              const VkAllocationCallbacks *pAllocator,
12484                                                              VkDebugReportCallbackEXT *pMsgCallback) {
12485    return core_validation::CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
12486}
12487
12488VKAPI_ATTR void VKAPI_CALL vkDestroyDebugReportCallbackEXT(VkInstance instance, VkDebugReportCallbackEXT msgCallback,
12489                                                           const VkAllocationCallbacks *pAllocator) {
12490    core_validation::DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
12491}
12492
12493VKAPI_ATTR void VKAPI_CALL vkDebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags,
12494                                                   VkDebugReportObjectTypeEXT objType, uint64_t object, size_t location,
12495                                                   int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
12496    core_validation::DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, pMsg);
12497}
12498
12499// loader-layer interface v0, just wrappers since there is only a layer
12500
12501VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount,
12502                                                                                      VkExtensionProperties *pProperties) {
12503    return core_validation::EnumerateInstanceExtensionProperties(pLayerName, pCount, pProperties);
12504}
12505
12506VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceLayerProperties(uint32_t *pCount,
12507                                                                                  VkLayerProperties *pProperties) {
12508    return core_validation::EnumerateInstanceLayerProperties(pCount, pProperties);
12509}
12510
12511VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
12512                                                                                VkLayerProperties *pProperties) {
12513    // the layer command handles VK_NULL_HANDLE just fine internally
12514    assert(physicalDevice == VK_NULL_HANDLE);
12515    return core_validation::EnumerateDeviceLayerProperties(VK_NULL_HANDLE, pCount, pProperties);
12516}
12517
12518VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
12519                                                                                    const char *pLayerName, uint32_t *pCount,
12520                                                                                    VkExtensionProperties *pProperties) {
12521    // the layer command handles VK_NULL_HANDLE just fine internally
12522    assert(physicalDevice == VK_NULL_HANDLE);
12523    return core_validation::EnumerateDeviceExtensionProperties(VK_NULL_HANDLE, pLayerName, pCount, pProperties);
12524}
12525
12526VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) {
12527    return core_validation::GetDeviceProcAddr(dev, funcName);
12528}
12529
12530VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) {
12531    return core_validation::GetInstanceProcAddr(instance, funcName);
12532}
12533
12534VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_layerGetPhysicalDeviceProcAddr(VkInstance instance,
12535                                                                                           const char *funcName) {
12536    return core_validation::GetPhysicalDeviceProcAddr(instance, funcName);
12537}
12538
12539VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkNegotiateLoaderLayerInterfaceVersion(VkNegotiateLayerInterface *pVersionStruct) {
12540    assert(pVersionStruct != NULL);
12541    assert(pVersionStruct->sType == LAYER_NEGOTIATE_INTERFACE_STRUCT);
12542
12543    // Fill in the function pointers if our version is at least capable of having the structure contain them.
12544    if (pVersionStruct->loaderLayerInterfaceVersion >= 2) {
12545        pVersionStruct->pfnGetInstanceProcAddr = vkGetInstanceProcAddr;
12546        pVersionStruct->pfnGetDeviceProcAddr = vkGetDeviceProcAddr;
12547        pVersionStruct->pfnGetPhysicalDeviceProcAddr = vk_layerGetPhysicalDeviceProcAddr;
12548    }
12549
12550    if (pVersionStruct->loaderLayerInterfaceVersion < CURRENT_LOADER_LAYER_INTERFACE_VERSION) {
12551        core_validation::loader_layer_if_version = pVersionStruct->loaderLayerInterfaceVersion;
12552    } else if (pVersionStruct->loaderLayerInterfaceVersion > CURRENT_LOADER_LAYER_INTERFACE_VERSION) {
12553        pVersionStruct->loaderLayerInterfaceVersion = CURRENT_LOADER_LAYER_INTERFACE_VERSION;
12554    }
12555
12556    return VK_SUCCESS;
12557}
12558