core_validation.cpp revision dfe4c891638705248986e38c796425b89cdea8ff
1/* Copyright (c) 2015-2017 The Khronos Group Inc.
2 * Copyright (c) 2015-2017 Valve Corporation
3 * Copyright (c) 2015-2017 LunarG, Inc.
4 * Copyright (C) 2015-2017 Google Inc.
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 *     http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * Author: Cody Northrop <cnorthrop@google.com>
19 * Author: Michael Lentine <mlentine@google.com>
20 * Author: Tobin Ehlis <tobine@google.com>
21 * Author: Chia-I Wu <olv@google.com>
22 * Author: Chris Forbes <chrisf@ijw.co.nz>
23 * Author: Mark Lobodzinski <mark@lunarg.com>
24 * Author: Ian Elliott <ianelliott@google.com>
25 * Author: Dave Houlton <daveh@lunarg.com>
26 * Author: Dustin Graves <dustin@lunarg.com>
27 * Author: Jeremy Hayes <jeremy@lunarg.com>
28 * Author: Jon Ashburn <jon@lunarg.com>
29 * Author: Karl Schultz <karl@lunarg.com>
30 * Author: Mark Young <marky@lunarg.com>
31 * Author: Mike Schuchardt <mikes@lunarg.com>
32 * Author: Mike Weiblen <mikew@lunarg.com>
33 * Author: Tony Barbour <tony@LunarG.com>
34 */
35
36// Allow use of STL min and max functions in Windows
37#define NOMINMAX
38
39#include <SPIRV/spirv.hpp>
40#include <algorithm>
41#include <assert.h>
42#include <iostream>
43#include <list>
44#include <map>
45#include <mutex>
46#include <set>
47#include <sstream>
48#include <stdio.h>
49#include <stdlib.h>
50#include <string.h>
51#include <string>
52#include <tuple>
53
54#include "vk_loader_platform.h"
55#include "vk_dispatch_table_helper.h"
56#include "vk_enum_string_helper.h"
57#if defined(__GNUC__)
58#pragma GCC diagnostic ignored "-Wwrite-strings"
59#endif
60#if defined(__GNUC__)
61#pragma GCC diagnostic warning "-Wwrite-strings"
62#endif
63#include "core_validation.h"
64#include "buffer_validation.h"
65#include "vk_layer_table.h"
66#include "vk_layer_data.h"
67#include "vk_layer_extension_utils.h"
68#include "vk_layer_utils.h"
69#include "spirv-tools/libspirv.h"
70
71#if defined __ANDROID__
72#include <android/log.h>
73#define LOGCONSOLE(...) ((void)__android_log_print(ANDROID_LOG_INFO, "DS", __VA_ARGS__))
74#else
75#define LOGCONSOLE(...)      \
76    {                        \
77        printf(__VA_ARGS__); \
78        printf("\n");        \
79    }
80#endif
81
82// This intentionally includes a cpp file
83#include "vk_safe_struct.cpp"
84
85using namespace std;
86
87namespace core_validation {
88
89using std::unordered_map;
90using std::unordered_set;
91
92// WSI Image Objects bypass usual Image Object creation methods.  A special Memory
93// Object value will be used to identify them internally.
94static const VkDeviceMemory MEMTRACKER_SWAP_CHAIN_IMAGE_KEY = (VkDeviceMemory)(-1);
95// 2nd special memory handle used to flag object as unbound from memory
96static const VkDeviceMemory MEMORY_UNBOUND = VkDeviceMemory(~((uint64_t)(0)) - 1);
97
98// A special value of (0xFFFFFFFF, 0xFFFFFFFF) indicates that the surface size will be determined
99// by the extent of a swapchain targeting the surface.
100static const uint32_t kSurfaceSizeFromSwapchain = 0xFFFFFFFFu;
101
102struct devExts {
103    bool wsi_enabled;
104    bool wsi_display_swapchain_enabled;
105    bool nv_glsl_shader_enabled;
106    unordered_map<VkSwapchainKHR, unique_ptr<SWAPCHAIN_NODE>> swapchainMap;
107    unordered_map<VkImage, VkSwapchainKHR> imageToSwapchainMap;
108};
109
110// fwd decls
111struct shader_module;
112
113struct instance_layer_data {
114    VkInstance instance = VK_NULL_HANDLE;
115    debug_report_data *report_data = nullptr;
116    std::vector<VkDebugReportCallbackEXT> logging_callback;
117    VkLayerInstanceDispatchTable dispatch_table;
118
119    CALL_STATE vkEnumeratePhysicalDevicesState = UNCALLED;
120    uint32_t physical_devices_count = 0;
121    CALL_STATE vkEnumeratePhysicalDeviceGroupsState = UNCALLED;
122    uint32_t physical_device_groups_count = 0;
123    CHECK_DISABLED disabled = {};
124
125    unordered_map<VkPhysicalDevice, PHYSICAL_DEVICE_STATE> physical_device_map;
126    unordered_map<VkSurfaceKHR, SURFACE_STATE> surface_map;
127
128    bool surfaceExtensionEnabled = false;
129    bool displayExtensionEnabled = false;
130    bool androidSurfaceExtensionEnabled = false;
131    bool mirSurfaceExtensionEnabled = false;
132    bool waylandSurfaceExtensionEnabled = false;
133    bool win32SurfaceExtensionEnabled = false;
134    bool xcbSurfaceExtensionEnabled = false;
135    bool xlibSurfaceExtensionEnabled = false;
136};
137
138struct layer_data {
139    debug_report_data *report_data = nullptr;
140    VkLayerDispatchTable dispatch_table;
141
142    devExts device_extensions = {};
143    unordered_set<VkQueue> queues;  // All queues under given device
144    // Global set of all cmdBuffers that are inFlight on this device
145    unordered_set<VkCommandBuffer> globalInFlightCmdBuffers;
146    // Layer specific data
147    unordered_map<VkSampler, unique_ptr<SAMPLER_STATE>> samplerMap;
148    unordered_map<VkImageView, unique_ptr<IMAGE_VIEW_STATE>> imageViewMap;
149    unordered_map<VkImage, unique_ptr<IMAGE_STATE>> imageMap;
150    unordered_map<VkBufferView, unique_ptr<BUFFER_VIEW_STATE>> bufferViewMap;
151    unordered_map<VkBuffer, unique_ptr<BUFFER_STATE>> bufferMap;
152    unordered_map<VkPipeline, PIPELINE_STATE *> pipelineMap;
153    unordered_map<VkCommandPool, COMMAND_POOL_NODE> commandPoolMap;
154    unordered_map<VkDescriptorPool, DESCRIPTOR_POOL_STATE *> descriptorPoolMap;
155    unordered_map<VkDescriptorSet, cvdescriptorset::DescriptorSet *> setMap;
156    unordered_map<VkDescriptorSetLayout, cvdescriptorset::DescriptorSetLayout *> descriptorSetLayoutMap;
157    unordered_map<VkPipelineLayout, PIPELINE_LAYOUT_NODE> pipelineLayoutMap;
158    unordered_map<VkDeviceMemory, unique_ptr<DEVICE_MEM_INFO>> memObjMap;
159    unordered_map<VkFence, FENCE_NODE> fenceMap;
160    unordered_map<VkQueue, QUEUE_STATE> queueMap;
161    unordered_map<VkEvent, EVENT_STATE> eventMap;
162    unordered_map<QueryObject, bool> queryToStateMap;
163    unordered_map<VkQueryPool, QUERY_POOL_NODE> queryPoolMap;
164    unordered_map<VkSemaphore, SEMAPHORE_NODE> semaphoreMap;
165    unordered_map<VkCommandBuffer, GLOBAL_CB_NODE *> commandBufferMap;
166    unordered_map<VkFramebuffer, unique_ptr<FRAMEBUFFER_STATE>> frameBufferMap;
167    unordered_map<VkImage, vector<ImageSubresourcePair>> imageSubresourceMap;
168    unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> imageLayoutMap;
169    unordered_map<VkRenderPass, unique_ptr<RENDER_PASS_STATE>> renderPassMap;
170    unordered_map<VkShaderModule, unique_ptr<shader_module>> shaderModuleMap;
171
172    VkDevice device = VK_NULL_HANDLE;
173    VkPhysicalDevice physical_device = VK_NULL_HANDLE;
174
175    instance_layer_data *instance_data = nullptr;  // from device to enclosing instance
176
177    VkPhysicalDeviceFeatures enabled_features = {};
178    // Device specific data
179    PHYS_DEV_PROPERTIES_NODE phys_dev_properties = {};
180    VkPhysicalDeviceMemoryProperties phys_dev_mem_props = {};
181    VkPhysicalDeviceProperties phys_dev_props = {};
182};
183
184// TODO : Do we need to guard access to layer_data_map w/ lock?
185static unordered_map<void *, layer_data *> layer_data_map;
186static unordered_map<void *, instance_layer_data *> instance_layer_data_map;
187
188static uint32_t loader_layer_if_version = CURRENT_LOADER_LAYER_INTERFACE_VERSION;
189
190static const VkLayerProperties global_layer = {
191    "VK_LAYER_LUNARG_core_validation", VK_LAYER_API_VERSION, 1, "LunarG Validation Layer",
192};
193
194template <class TCreateInfo>
195void ValidateLayerOrdering(const TCreateInfo &createInfo) {
196    bool foundLayer = false;
197    for (uint32_t i = 0; i < createInfo.enabledLayerCount; ++i) {
198        if (!strcmp(createInfo.ppEnabledLayerNames[i], global_layer.layerName)) {
199            foundLayer = true;
200        }
201        // This has to be logged to console as we don't have a callback at this point.
202        if (!foundLayer && !strcmp(createInfo.ppEnabledLayerNames[0], "VK_LAYER_GOOGLE_unique_objects")) {
203            LOGCONSOLE("Cannot activate layer VK_LAYER_GOOGLE_unique_objects prior to activating %s.", global_layer.layerName);
204        }
205    }
206}
207
208// Code imported from shader_checker
209static void build_def_index(shader_module *);
210
211// A forward iterator over spirv instructions. Provides easy access to len, opcode, and content words
212// without the caller needing to care too much about the physical SPIRV module layout.
213struct spirv_inst_iter {
214    std::vector<uint32_t>::const_iterator zero;
215    std::vector<uint32_t>::const_iterator it;
216
217    uint32_t len() {
218        auto result = *it >> 16;
219        assert(result > 0);
220        return result;
221    }
222
223    uint32_t opcode() { return *it & 0x0ffffu; }
224
225    uint32_t const &word(unsigned n) {
226        assert(n < len());
227        return it[n];
228    }
229
230    uint32_t offset() { return (uint32_t)(it - zero); }
231
232    spirv_inst_iter() {}
233
234    spirv_inst_iter(std::vector<uint32_t>::const_iterator zero, std::vector<uint32_t>::const_iterator it) : zero(zero), it(it) {}
235
236    bool operator==(spirv_inst_iter const &other) { return it == other.it; }
237
238    bool operator!=(spirv_inst_iter const &other) { return it != other.it; }
239
240    spirv_inst_iter operator++(int) {  // x++
241        spirv_inst_iter ii = *this;
242        it += len();
243        return ii;
244    }
245
246    spirv_inst_iter operator++() {  // ++x;
247        it += len();
248        return *this;
249    }
250
251    // The iterator and the value are the same thing.
252    spirv_inst_iter &operator*() { return *this; }
253    spirv_inst_iter const &operator*() const { return *this; }
254};
255
256struct shader_module {
257    // The spirv image itself
258    vector<uint32_t> words;
259    // A mapping of <id> to the first word of its def. this is useful because walking type
260    // trees, constant expressions, etc requires jumping all over the instruction stream.
261    unordered_map<unsigned, unsigned> def_index;
262    bool has_valid_spirv;
263
264    shader_module(VkShaderModuleCreateInfo const *pCreateInfo)
265        : words((uint32_t *)pCreateInfo->pCode, (uint32_t *)pCreateInfo->pCode + pCreateInfo->codeSize / sizeof(uint32_t)),
266          def_index(),
267          has_valid_spirv(true) {
268        build_def_index(this);
269    }
270
271    shader_module() : has_valid_spirv(false) {}
272
273    // Expose begin() / end() to enable range-based for
274    spirv_inst_iter begin() const { return spirv_inst_iter(words.begin(), words.begin() + 5); }  // First insn
275    spirv_inst_iter end() const { return spirv_inst_iter(words.begin(), words.end()); }          // Just past last insn
276    // Given an offset into the module, produce an iterator there.
277    spirv_inst_iter at(unsigned offset) const { return spirv_inst_iter(words.begin(), words.begin() + offset); }
278
279    // Gets an iterator to the definition of an id
280    spirv_inst_iter get_def(unsigned id) const {
281        auto it = def_index.find(id);
282        if (it == def_index.end()) {
283            return end();
284        }
285        return at(it->second);
286    }
287};
288
289// TODO : This can be much smarter, using separate locks for separate global data
290static std::mutex global_lock;
291
292// Return IMAGE_VIEW_STATE ptr for specified imageView or else NULL
293IMAGE_VIEW_STATE *getImageViewState(const layer_data *dev_data, VkImageView image_view) {
294    auto iv_it = dev_data->imageViewMap.find(image_view);
295    if (iv_it == dev_data->imageViewMap.end()) {
296        return nullptr;
297    }
298    return iv_it->second.get();
299}
300// Return sampler node ptr for specified sampler or else NULL
301SAMPLER_STATE *getSamplerState(const layer_data *dev_data, VkSampler sampler) {
302    auto sampler_it = dev_data->samplerMap.find(sampler);
303    if (sampler_it == dev_data->samplerMap.end()) {
304        return nullptr;
305    }
306    return sampler_it->second.get();
307}
308// Return image state ptr for specified image or else NULL
309IMAGE_STATE *getImageState(const layer_data *dev_data, VkImage image) {
310    auto img_it = dev_data->imageMap.find(image);
311    if (img_it == dev_data->imageMap.end()) {
312        return nullptr;
313    }
314    return img_it->second.get();
315}
316// Return buffer state ptr for specified buffer or else NULL
317BUFFER_STATE *getBufferState(const layer_data *dev_data, VkBuffer buffer) {
318    auto buff_it = dev_data->bufferMap.find(buffer);
319    if (buff_it == dev_data->bufferMap.end()) {
320        return nullptr;
321    }
322    return buff_it->second.get();
323}
324// Return swapchain node for specified swapchain or else NULL
325SWAPCHAIN_NODE *getSwapchainNode(const layer_data *dev_data, VkSwapchainKHR swapchain) {
326    auto swp_it = dev_data->device_extensions.swapchainMap.find(swapchain);
327    if (swp_it == dev_data->device_extensions.swapchainMap.end()) {
328        return nullptr;
329    }
330    return swp_it->second.get();
331}
332// Return swapchain for specified image or else NULL
333VkSwapchainKHR getSwapchainFromImage(const layer_data *dev_data, VkImage image) {
334    auto img_it = dev_data->device_extensions.imageToSwapchainMap.find(image);
335    if (img_it == dev_data->device_extensions.imageToSwapchainMap.end()) {
336        return VK_NULL_HANDLE;
337    }
338    return img_it->second;
339}
340// Return buffer node ptr for specified buffer or else NULL
341BUFFER_VIEW_STATE *getBufferViewState(const layer_data *dev_data, VkBufferView buffer_view) {
342    auto bv_it = dev_data->bufferViewMap.find(buffer_view);
343    if (bv_it == dev_data->bufferViewMap.end()) {
344        return nullptr;
345    }
346    return bv_it->second.get();
347}
348
349FENCE_NODE *getFenceNode(layer_data *dev_data, VkFence fence) {
350    auto it = dev_data->fenceMap.find(fence);
351    if (it == dev_data->fenceMap.end()) {
352        return nullptr;
353    }
354    return &it->second;
355}
356
357EVENT_STATE *getEventNode(layer_data *dev_data, VkEvent event) {
358    auto it = dev_data->eventMap.find(event);
359    if (it == dev_data->eventMap.end()) {
360        return nullptr;
361    }
362    return &it->second;
363}
364
365QUERY_POOL_NODE *getQueryPoolNode(layer_data *dev_data, VkQueryPool query_pool) {
366    auto it = dev_data->queryPoolMap.find(query_pool);
367    if (it == dev_data->queryPoolMap.end()) {
368        return nullptr;
369    }
370    return &it->second;
371}
372
373QUEUE_STATE *getQueueState(layer_data *dev_data, VkQueue queue) {
374    auto it = dev_data->queueMap.find(queue);
375    if (it == dev_data->queueMap.end()) {
376        return nullptr;
377    }
378    return &it->second;
379}
380
381SEMAPHORE_NODE *getSemaphoreNode(layer_data *dev_data, VkSemaphore semaphore) {
382    auto it = dev_data->semaphoreMap.find(semaphore);
383    if (it == dev_data->semaphoreMap.end()) {
384        return nullptr;
385    }
386    return &it->second;
387}
388
389COMMAND_POOL_NODE *getCommandPoolNode(layer_data *dev_data, VkCommandPool pool) {
390    auto it = dev_data->commandPoolMap.find(pool);
391    if (it == dev_data->commandPoolMap.end()) {
392        return nullptr;
393    }
394    return &it->second;
395}
396
397PHYSICAL_DEVICE_STATE *getPhysicalDeviceState(instance_layer_data *instance_data, VkPhysicalDevice phys) {
398    auto it = instance_data->physical_device_map.find(phys);
399    if (it == instance_data->physical_device_map.end()) {
400        return nullptr;
401    }
402    return &it->second;
403}
404
405SURFACE_STATE *getSurfaceState(instance_layer_data *instance_data, VkSurfaceKHR surface) {
406    auto it = instance_data->surface_map.find(surface);
407    if (it == instance_data->surface_map.end()) {
408        return nullptr;
409    }
410    return &it->second;
411}
412
413// Return ptr to memory binding for given handle of specified type
414static BINDABLE *GetObjectMemBinding(layer_data *dev_data, uint64_t handle, VkDebugReportObjectTypeEXT type) {
415    switch (type) {
416        case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT:
417            return getImageState(dev_data, VkImage(handle));
418        case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
419            return getBufferState(dev_data, VkBuffer(handle));
420        default:
421            break;
422    }
423    return nullptr;
424}
425// prototype
426GLOBAL_CB_NODE *getCBNode(layer_data const *, const VkCommandBuffer);
427
428// Helper function to validate correct usage bits set for buffers or images
429//  Verify that (actual & desired) flags != 0 or,
430//   if strict is true, verify that (actual & desired) flags == desired
431//  In case of error, report it via dbg callbacks
432static bool validate_usage_flags(layer_data *dev_data, VkFlags actual, VkFlags desired, VkBool32 strict, uint64_t obj_handle,
433                                 VkDebugReportObjectTypeEXT obj_type, int32_t const msgCode, char const *ty_str,
434                                 char const *func_name, char const *usage_str) {
435    bool correct_usage = false;
436    bool skip_call = false;
437    if (strict)
438        correct_usage = ((actual & desired) == desired);
439    else
440        correct_usage = ((actual & desired) != 0);
441    if (!correct_usage) {
442        if (msgCode == -1) {
443            // TODO: Fix callers with msgCode == -1 to use correct validation checks.
444            skip_call = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_type, obj_handle, __LINE__,
445                                MEMTRACK_INVALID_USAGE_FLAG, "MEM",
446                                "Invalid usage flag for %s 0x%" PRIxLEAST64
447                                " used by %s. In this case, %s should have %s set during creation.",
448                                ty_str, obj_handle, func_name, ty_str, usage_str);
449        } else {
450            const char *valid_usage = (msgCode == -1) ? "" : validation_error_map[msgCode];
451            skip_call = log_msg(
452                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_type, obj_handle, __LINE__, msgCode, "MEM",
453                "Invalid usage flag for %s 0x%" PRIxLEAST64 " used by %s. In this case, %s should have %s set during creation. %s",
454                ty_str, obj_handle, func_name, ty_str, usage_str, valid_usage);
455        }
456    }
457    return skip_call;
458}
459
460// Helper function to validate usage flags for buffers
461// For given buffer_state send actual vs. desired usage off to helper above where
462//  an error will be flagged if usage is not correct
463bool ValidateImageUsageFlags(layer_data *dev_data, IMAGE_STATE const *image_state, VkFlags desired, VkBool32 strict,
464                             int32_t const msgCode, char const *func_name, char const *usage_string) {
465    return validate_usage_flags(dev_data, image_state->createInfo.usage, desired, strict,
466                                reinterpret_cast<const uint64_t &>(image_state->image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
467                                msgCode, "image", func_name, usage_string);
468}
469
470// Helper function to validate usage flags for buffers
471// For given buffer_state send actual vs. desired usage off to helper above where
472//  an error will be flagged if usage is not correct
473static bool ValidateBufferUsageFlags(layer_data *dev_data, BUFFER_STATE const *buffer_state, VkFlags desired, VkBool32 strict,
474                                     int32_t const msgCode, char const *func_name, char const *usage_string) {
475    return validate_usage_flags(dev_data, buffer_state->createInfo.usage, desired, strict,
476                                reinterpret_cast<const uint64_t &>(buffer_state->buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
477                                msgCode, "buffer", func_name, usage_string);
478}
479
480// Return ptr to info in map container containing mem, or NULL if not found
481//  Calls to this function should be wrapped in mutex
482DEVICE_MEM_INFO *getMemObjInfo(const layer_data *dev_data, const VkDeviceMemory mem) {
483    auto mem_it = dev_data->memObjMap.find(mem);
484    if (mem_it == dev_data->memObjMap.end()) {
485        return NULL;
486    }
487    return mem_it->second.get();
488}
489
490static void add_mem_obj_info(layer_data *dev_data, void *object, const VkDeviceMemory mem,
491                             const VkMemoryAllocateInfo *pAllocateInfo) {
492    assert(object != NULL);
493
494    dev_data->memObjMap[mem] = unique_ptr<DEVICE_MEM_INFO>(new DEVICE_MEM_INFO(object, mem, pAllocateInfo));
495}
496
497// Helper function to print lowercase string of object type
498//  TODO: Unify string helper functions, this should really come out of a string helper if not there already
499static const char *object_type_to_string(VkDebugReportObjectTypeEXT type) {
500    switch (type) {
501        case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT:
502            return "image";
503        case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
504            return "buffer";
505        case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT:
506            return "image view";
507        case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT:
508            return "buffer view";
509        case VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT:
510            return "swapchain";
511        case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT:
512            return "descriptor set";
513        case VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT:
514            return "framebuffer";
515        case VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT:
516            return "event";
517        case VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT:
518            return "query pool";
519        case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT:
520            return "descriptor pool";
521        case VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT:
522            return "command pool";
523        case VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT:
524            return "pipeline";
525        case VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT:
526            return "sampler";
527        case VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT:
528            return "renderpass";
529        case VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT:
530            return "device memory";
531        case VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT:
532            return "semaphore";
533        default:
534            return "unknown";
535    }
536}
537
538// For given bound_object_handle, bound to given mem allocation, verify that the range for the bound object is valid
539static bool ValidateMemoryIsValid(layer_data *dev_data, VkDeviceMemory mem, uint64_t bound_object_handle,
540                                  VkDebugReportObjectTypeEXT type, const char *functionName) {
541    DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem);
542    if (mem_info) {
543        if (!mem_info->bound_ranges[bound_object_handle].valid) {
544            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
545                           reinterpret_cast<uint64_t &>(mem), __LINE__, MEMTRACK_INVALID_MEM_REGION, "MEM",
546                           "%s: Cannot read invalid region of memory allocation 0x%" PRIx64 " for bound %s object 0x%" PRIx64
547                           ", please fill the memory before using.",
548                           functionName, reinterpret_cast<uint64_t &>(mem), object_type_to_string(type), bound_object_handle);
549        }
550    }
551    return false;
552}
553// For given image_state
554//  If mem is special swapchain key, then verify that image_state valid member is true
555//  Else verify that the image's bound memory range is valid
556bool ValidateImageMemoryIsValid(layer_data *dev_data, IMAGE_STATE *image_state, const char *functionName) {
557    if (image_state->binding.mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
558        if (!image_state->valid) {
559            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
560                           reinterpret_cast<uint64_t &>(image_state->binding.mem), __LINE__, MEMTRACK_INVALID_MEM_REGION, "MEM",
561                           "%s: Cannot read invalid swapchain image 0x%" PRIx64 ", please fill the memory before using.",
562                           functionName, reinterpret_cast<uint64_t &>(image_state->image));
563        }
564    } else {
565        return ValidateMemoryIsValid(dev_data, image_state->binding.mem, reinterpret_cast<uint64_t &>(image_state->image),
566                                     VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, functionName);
567    }
568    return false;
569}
570// For given buffer_state, verify that the range it's bound to is valid
571static bool ValidateBufferMemoryIsValid(layer_data *dev_data, BUFFER_STATE *buffer_state, const char *functionName) {
572    return ValidateMemoryIsValid(dev_data, buffer_state->binding.mem, reinterpret_cast<uint64_t &>(buffer_state->buffer),
573                                 VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, functionName);
574}
575// For the given memory allocation, set the range bound by the given handle object to the valid param value
576static void SetMemoryValid(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, bool valid) {
577    DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem);
578    if (mem_info) {
579        mem_info->bound_ranges[handle].valid = valid;
580    }
581}
582// For given image node
583//  If mem is special swapchain key, then set entire image_state to valid param value
584//  Else set the image's bound memory range to valid param value
585void SetImageMemoryValid(layer_data *dev_data, IMAGE_STATE *image_state, bool valid) {
586    if (image_state->binding.mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
587        image_state->valid = valid;
588    } else {
589        SetMemoryValid(dev_data, image_state->binding.mem, reinterpret_cast<uint64_t &>(image_state->image), valid);
590    }
591}
592// For given buffer node set the buffer's bound memory range to valid param value
593static void SetBufferMemoryValid(layer_data *dev_data, BUFFER_STATE *buffer_state, bool valid) {
594    SetMemoryValid(dev_data, buffer_state->binding.mem, reinterpret_cast<uint64_t &>(buffer_state->buffer), valid);
595}
596// Find CB Info and add mem reference to list container
597// Find Mem Obj Info and add CB reference to list container
598static bool update_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb, const VkDeviceMemory mem,
599                                              const char *apiName) {
600    bool skip_call = false;
601
602    // Skip validation if this image was created through WSI
603    if (mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
604        // First update CB binding in MemObj mini CB list
605        DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, mem);
606        if (pMemInfo) {
607            // Now update CBInfo's Mem reference list
608            GLOBAL_CB_NODE *cb_node = getCBNode(dev_data, cb);
609            pMemInfo->cb_bindings.insert(cb_node);
610            // TODO: keep track of all destroyed CBs so we know if this is a stale or simply invalid object
611            if (cb_node) {
612                cb_node->memObjs.insert(mem);
613            }
614        }
615    }
616    return skip_call;
617}
618
619// Create binding link between given sampler and command buffer node
620void AddCommandBufferBindingSampler(GLOBAL_CB_NODE *cb_node, SAMPLER_STATE *sampler_state) {
621    sampler_state->cb_bindings.insert(cb_node);
622    cb_node->object_bindings.insert(
623        {reinterpret_cast<uint64_t &>(sampler_state->sampler), VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT});
624}
625
626// Create binding link between given image node and command buffer node
627void AddCommandBufferBindingImage(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, IMAGE_STATE *image_state) {
628    // Skip validation if this image was created through WSI
629    if (image_state->binding.mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
630        // First update CB binding in MemObj mini CB list
631        for (auto mem_binding : image_state->GetBoundMemory()) {
632            DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, mem_binding);
633            if (pMemInfo) {
634                pMemInfo->cb_bindings.insert(cb_node);
635                // Now update CBInfo's Mem reference list
636                cb_node->memObjs.insert(mem_binding);
637            }
638        }
639        // Now update cb binding for image
640        cb_node->object_bindings.insert({reinterpret_cast<uint64_t &>(image_state->image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT});
641        image_state->cb_bindings.insert(cb_node);
642    }
643}
644
645// Create binding link between given image view node and its image with command buffer node
646void AddCommandBufferBindingImageView(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, IMAGE_VIEW_STATE *view_state) {
647    // First add bindings for imageView
648    view_state->cb_bindings.insert(cb_node);
649    cb_node->object_bindings.insert(
650        {reinterpret_cast<uint64_t &>(view_state->image_view), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT});
651    auto image_state = getImageState(dev_data, view_state->create_info.image);
652    // Add bindings for image within imageView
653    if (image_state) {
654        AddCommandBufferBindingImage(dev_data, cb_node, image_state);
655    }
656}
657
658// Create binding link between given buffer node and command buffer node
659void AddCommandBufferBindingBuffer(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, BUFFER_STATE *buffer_state) {
660    // First update CB binding in MemObj mini CB list
661    for (auto mem_binding : buffer_state->GetBoundMemory()) {
662        DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, mem_binding);
663        if (pMemInfo) {
664            pMemInfo->cb_bindings.insert(cb_node);
665            // Now update CBInfo's Mem reference list
666            cb_node->memObjs.insert(mem_binding);
667        }
668    }
669    // Now update cb binding for buffer
670    cb_node->object_bindings.insert({reinterpret_cast<uint64_t &>(buffer_state->buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT});
671    buffer_state->cb_bindings.insert(cb_node);
672}
673
674// Create binding link between given buffer view node and its buffer with command buffer node
675void AddCommandBufferBindingBufferView(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, BUFFER_VIEW_STATE *view_state) {
676    // First add bindings for bufferView
677    view_state->cb_bindings.insert(cb_node);
678    cb_node->object_bindings.insert(
679        {reinterpret_cast<uint64_t &>(view_state->buffer_view), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT});
680    auto buffer_state = getBufferState(dev_data, view_state->create_info.buffer);
681    // Add bindings for buffer within bufferView
682    if (buffer_state) {
683        AddCommandBufferBindingBuffer(dev_data, cb_node, buffer_state);
684    }
685}
686
687// For every mem obj bound to particular CB, free bindings related to that CB
688static void clear_cmd_buf_and_mem_references(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
689    if (cb_node) {
690        if (cb_node->memObjs.size() > 0) {
691            for (auto mem : cb_node->memObjs) {
692                DEVICE_MEM_INFO *pInfo = getMemObjInfo(dev_data, mem);
693                if (pInfo) {
694                    pInfo->cb_bindings.erase(cb_node);
695                }
696            }
697            cb_node->memObjs.clear();
698        }
699        cb_node->validate_functions.clear();
700    }
701}
702// Overloaded call to above function when GLOBAL_CB_NODE has not already been looked-up
703static void clear_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb) {
704    clear_cmd_buf_and_mem_references(dev_data, getCBNode(dev_data, cb));
705}
706
707// Clear a single object binding from given memory object, or report error if binding is missing
708static bool ClearMemoryObjectBinding(layer_data *dev_data, uint64_t handle, VkDebugReportObjectTypeEXT type, VkDeviceMemory mem) {
709    DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem);
710    // This obj is bound to a memory object. Remove the reference to this object in that memory object's list
711    if (mem_info) {
712        mem_info->obj_bindings.erase({handle, type});
713    }
714    return false;
715}
716
717// ClearMemoryObjectBindings clears the binding of objects to memory
718//  For the given object it pulls the memory bindings and makes sure that the bindings
719//  no longer refer to the object being cleared. This occurs when objects are destroyed.
720bool ClearMemoryObjectBindings(layer_data *dev_data, uint64_t handle, VkDebugReportObjectTypeEXT type) {
721    bool skip = false;
722    BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
723    if (mem_binding) {
724        if (!mem_binding->sparse) {
725            skip = ClearMemoryObjectBinding(dev_data, handle, type, mem_binding->binding.mem);
726        } else {  // Sparse, clear all bindings
727            for (auto &sparse_mem_binding : mem_binding->sparse_bindings) {
728                skip |= ClearMemoryObjectBinding(dev_data, handle, type, sparse_mem_binding.mem);
729            }
730        }
731    }
732    return skip;
733}
734
735// For given mem object, verify that it is not null or UNBOUND, if it is, report error. Return skip value.
736bool VerifyBoundMemoryIsValid(const layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, const char *api_name,
737                              const char *type_name, UNIQUE_VALIDATION_ERROR_CODE error_code) {
738    bool result = false;
739    if (VK_NULL_HANDLE == mem) {
740        result = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, handle,
741                         __LINE__, error_code, "MEM", "%s: Vk%s object 0x%" PRIxLEAST64
742                                                      " used with no memory bound. Memory should be bound by calling "
743                                                      "vkBind%sMemory(). %s",
744                         api_name, type_name, handle, type_name, validation_error_map[error_code]);
745    } else if (MEMORY_UNBOUND == mem) {
746        result = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, handle,
747                         __LINE__, error_code, "MEM", "%s: Vk%s object 0x%" PRIxLEAST64
748                                                      " used with no memory bound and previously bound memory was freed. "
749                                                      "Memory must not be freed prior to this operation. %s",
750                         api_name, type_name, handle, validation_error_map[error_code]);
751    }
752    return result;
753}
754
755// Check to see if memory was ever bound to this image
756bool ValidateMemoryIsBoundToImage(const layer_data *dev_data, const IMAGE_STATE *image_state, const char *api_name,
757                                  UNIQUE_VALIDATION_ERROR_CODE error_code) {
758    bool result = false;
759    if (0 == (static_cast<uint32_t>(image_state->createInfo.flags) & VK_IMAGE_CREATE_SPARSE_BINDING_BIT)) {
760        result = VerifyBoundMemoryIsValid(dev_data, image_state->binding.mem,
761                                          reinterpret_cast<const uint64_t &>(image_state->image), api_name, "Image", error_code);
762    }
763    return result;
764}
765
766// Check to see if memory was bound to this buffer
767bool ValidateMemoryIsBoundToBuffer(const layer_data *dev_data, const BUFFER_STATE *buffer_state, const char *api_name,
768                                   UNIQUE_VALIDATION_ERROR_CODE error_code) {
769    bool result = false;
770    if (0 == (static_cast<uint32_t>(buffer_state->createInfo.flags) & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)) {
771        result = VerifyBoundMemoryIsValid(dev_data, buffer_state->binding.mem,
772                                          reinterpret_cast<const uint64_t &>(buffer_state->buffer), api_name, "Buffer", error_code);
773    }
774    return result;
775}
776
777// SetMemBinding is used to establish immutable, non-sparse binding between a single image/buffer object and memory object
778// For NULL mem case, output warning
779// Make sure given object is in global object map
780//  IF a previous binding existed, output validation error
781//  Otherwise, add reference from objectInfo to memoryInfo
782//  Add reference off of objInfo
783// TODO: We may need to refactor or pass in multiple valid usage statements to handle multiple valid usage conditions.
784static bool SetMemBinding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, VkDebugReportObjectTypeEXT type,
785                          const char *apiName) {
786    bool skip_call = false;
787    // It's an error to bind an object to NULL memory
788    if (mem != VK_NULL_HANDLE) {
789        BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
790        assert(mem_binding);
791        // TODO : Add check here to make sure object isn't sparse
792        //  VALIDATION_ERROR_00792 for buffers
793        //  VALIDATION_ERROR_00804 for images
794        assert(!mem_binding->sparse);
795        DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem);
796        if (mem_info) {
797            DEVICE_MEM_INFO *prev_binding = getMemObjInfo(dev_data, mem_binding->binding.mem);
798            if (prev_binding) {
799                // TODO: VALIDATION_ERROR_00791 and VALIDATION_ERROR_00803
800                skip_call |=
801                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
802                            reinterpret_cast<uint64_t &>(mem), __LINE__, MEMTRACK_REBIND_OBJECT, "MEM",
803                            "In %s, attempting to bind memory (0x%" PRIxLEAST64 ") to object (0x%" PRIxLEAST64
804                            ") which has already been bound to mem object 0x%" PRIxLEAST64,
805                            apiName, reinterpret_cast<uint64_t &>(mem), handle, reinterpret_cast<uint64_t &>(prev_binding->mem));
806            } else if (mem_binding->binding.mem == MEMORY_UNBOUND) {
807                skip_call |=
808                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
809                            reinterpret_cast<uint64_t &>(mem), __LINE__, MEMTRACK_REBIND_OBJECT, "MEM",
810                            "In %s, attempting to bind memory (0x%" PRIxLEAST64 ") to object (0x%" PRIxLEAST64
811                            ") which was previous bound to memory that has since been freed. Memory bindings are immutable in "
812                            "Vulkan so this attempt to bind to new memory is not allowed.",
813                            apiName, reinterpret_cast<uint64_t &>(mem), handle);
814            } else {
815                mem_info->obj_bindings.insert({handle, type});
816                // For image objects, make sure default memory state is correctly set
817                // TODO : What's the best/correct way to handle this?
818                if (VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT == type) {
819                    auto const image_state = getImageState(dev_data, VkImage(handle));
820                    if (image_state) {
821                        VkImageCreateInfo ici = image_state->createInfo;
822                        if (ici.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
823                            // TODO::  More memory state transition stuff.
824                        }
825                    }
826                }
827                mem_binding->binding.mem = mem;
828            }
829        }
830    }
831    return skip_call;
832}
833
834// For NULL mem case, clear any previous binding Else...
835// Make sure given object is in its object map
836//  IF a previous binding existed, update binding
837//  Add reference from objectInfo to memoryInfo
838//  Add reference off of object's binding info
839// Return VK_TRUE if addition is successful, VK_FALSE otherwise
840static bool SetSparseMemBinding(layer_data *dev_data, MEM_BINDING binding, uint64_t handle, VkDebugReportObjectTypeEXT type,
841                                const char *apiName) {
842    bool skip_call = VK_FALSE;
843    // Handle NULL case separately, just clear previous binding & decrement reference
844    if (binding.mem == VK_NULL_HANDLE) {
845        // TODO : This should cause the range of the resource to be unbound according to spec
846    } else {
847        BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
848        assert(mem_binding);
849        assert(mem_binding->sparse);
850        DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, binding.mem);
851        if (mem_info) {
852            mem_info->obj_bindings.insert({handle, type});
853            // Need to set mem binding for this object
854            mem_binding->sparse_bindings.insert(binding);
855        }
856    }
857    return skip_call;
858}
859
860// Return a string representation of CMD_TYPE enum
861static string cmdTypeToString(CMD_TYPE cmd) {
862    switch (cmd) {
863        case CMD_BINDPIPELINE:
864            return "CMD_BINDPIPELINE";
865        case CMD_BINDPIPELINEDELTA:
866            return "CMD_BINDPIPELINEDELTA";
867        case CMD_SETVIEWPORTSTATE:
868            return "CMD_SETVIEWPORTSTATE";
869        case CMD_SETLINEWIDTHSTATE:
870            return "CMD_SETLINEWIDTHSTATE";
871        case CMD_SETDEPTHBIASSTATE:
872            return "CMD_SETDEPTHBIASSTATE";
873        case CMD_SETBLENDSTATE:
874            return "CMD_SETBLENDSTATE";
875        case CMD_SETDEPTHBOUNDSSTATE:
876            return "CMD_SETDEPTHBOUNDSSTATE";
877        case CMD_SETSTENCILREADMASKSTATE:
878            return "CMD_SETSTENCILREADMASKSTATE";
879        case CMD_SETSTENCILWRITEMASKSTATE:
880            return "CMD_SETSTENCILWRITEMASKSTATE";
881        case CMD_SETSTENCILREFERENCESTATE:
882            return "CMD_SETSTENCILREFERENCESTATE";
883        case CMD_BINDDESCRIPTORSETS:
884            return "CMD_BINDDESCRIPTORSETS";
885        case CMD_BINDINDEXBUFFER:
886            return "CMD_BINDINDEXBUFFER";
887        case CMD_BINDVERTEXBUFFER:
888            return "CMD_BINDVERTEXBUFFER";
889        case CMD_DRAW:
890            return "CMD_DRAW";
891        case CMD_DRAWINDEXED:
892            return "CMD_DRAWINDEXED";
893        case CMD_DRAWINDIRECT:
894            return "CMD_DRAWINDIRECT";
895        case CMD_DRAWINDEXEDINDIRECT:
896            return "CMD_DRAWINDEXEDINDIRECT";
897        case CMD_DISPATCH:
898            return "CMD_DISPATCH";
899        case CMD_DISPATCHINDIRECT:
900            return "CMD_DISPATCHINDIRECT";
901        case CMD_COPYBUFFER:
902            return "CMD_COPYBUFFER";
903        case CMD_COPYIMAGE:
904            return "CMD_COPYIMAGE";
905        case CMD_BLITIMAGE:
906            return "CMD_BLITIMAGE";
907        case CMD_COPYBUFFERTOIMAGE:
908            return "CMD_COPYBUFFERTOIMAGE";
909        case CMD_COPYIMAGETOBUFFER:
910            return "CMD_COPYIMAGETOBUFFER";
911        case CMD_CLONEIMAGEDATA:
912            return "CMD_CLONEIMAGEDATA";
913        case CMD_UPDATEBUFFER:
914            return "CMD_UPDATEBUFFER";
915        case CMD_FILLBUFFER:
916            return "CMD_FILLBUFFER";
917        case CMD_CLEARCOLORIMAGE:
918            return "CMD_CLEARCOLORIMAGE";
919        case CMD_CLEARATTACHMENTS:
920            return "CMD_CLEARCOLORATTACHMENT";
921        case CMD_CLEARDEPTHSTENCILIMAGE:
922            return "CMD_CLEARDEPTHSTENCILIMAGE";
923        case CMD_RESOLVEIMAGE:
924            return "CMD_RESOLVEIMAGE";
925        case CMD_SETEVENT:
926            return "CMD_SETEVENT";
927        case CMD_RESETEVENT:
928            return "CMD_RESETEVENT";
929        case CMD_WAITEVENTS:
930            return "CMD_WAITEVENTS";
931        case CMD_PIPELINEBARRIER:
932            return "CMD_PIPELINEBARRIER";
933        case CMD_BEGINQUERY:
934            return "CMD_BEGINQUERY";
935        case CMD_ENDQUERY:
936            return "CMD_ENDQUERY";
937        case CMD_RESETQUERYPOOL:
938            return "CMD_RESETQUERYPOOL";
939        case CMD_COPYQUERYPOOLRESULTS:
940            return "CMD_COPYQUERYPOOLRESULTS";
941        case CMD_WRITETIMESTAMP:
942            return "CMD_WRITETIMESTAMP";
943        case CMD_INITATOMICCOUNTERS:
944            return "CMD_INITATOMICCOUNTERS";
945        case CMD_LOADATOMICCOUNTERS:
946            return "CMD_LOADATOMICCOUNTERS";
947        case CMD_SAVEATOMICCOUNTERS:
948            return "CMD_SAVEATOMICCOUNTERS";
949        case CMD_BEGINRENDERPASS:
950            return "CMD_BEGINRENDERPASS";
951        case CMD_ENDRENDERPASS:
952            return "CMD_ENDRENDERPASS";
953        default:
954            return "UNKNOWN";
955    }
956}
957
958// SPIRV utility functions
959static void build_def_index(shader_module *module) {
960    for (auto insn : *module) {
961        switch (insn.opcode()) {
962            // Types
963            case spv::OpTypeVoid:
964            case spv::OpTypeBool:
965            case spv::OpTypeInt:
966            case spv::OpTypeFloat:
967            case spv::OpTypeVector:
968            case spv::OpTypeMatrix:
969            case spv::OpTypeImage:
970            case spv::OpTypeSampler:
971            case spv::OpTypeSampledImage:
972            case spv::OpTypeArray:
973            case spv::OpTypeRuntimeArray:
974            case spv::OpTypeStruct:
975            case spv::OpTypeOpaque:
976            case spv::OpTypePointer:
977            case spv::OpTypeFunction:
978            case spv::OpTypeEvent:
979            case spv::OpTypeDeviceEvent:
980            case spv::OpTypeReserveId:
981            case spv::OpTypeQueue:
982            case spv::OpTypePipe:
983                module->def_index[insn.word(1)] = insn.offset();
984                break;
985
986            // Fixed constants
987            case spv::OpConstantTrue:
988            case spv::OpConstantFalse:
989            case spv::OpConstant:
990            case spv::OpConstantComposite:
991            case spv::OpConstantSampler:
992            case spv::OpConstantNull:
993                module->def_index[insn.word(2)] = insn.offset();
994                break;
995
996            // Specialization constants
997            case spv::OpSpecConstantTrue:
998            case spv::OpSpecConstantFalse:
999            case spv::OpSpecConstant:
1000            case spv::OpSpecConstantComposite:
1001            case spv::OpSpecConstantOp:
1002                module->def_index[insn.word(2)] = insn.offset();
1003                break;
1004
1005            // Variables
1006            case spv::OpVariable:
1007                module->def_index[insn.word(2)] = insn.offset();
1008                break;
1009
1010            // Functions
1011            case spv::OpFunction:
1012                module->def_index[insn.word(2)] = insn.offset();
1013                break;
1014
1015            default:
1016                // We don't care about any other defs for now.
1017                break;
1018        }
1019    }
1020}
1021
1022static spirv_inst_iter find_entrypoint(shader_module *src, char const *name, VkShaderStageFlagBits stageBits) {
1023    for (auto insn : *src) {
1024        if (insn.opcode() == spv::OpEntryPoint) {
1025            auto entrypointName = (char const *)&insn.word(3);
1026            auto entrypointStageBits = 1u << insn.word(1);
1027
1028            if (!strcmp(entrypointName, name) && (entrypointStageBits & stageBits)) {
1029                return insn;
1030            }
1031        }
1032    }
1033
1034    return src->end();
1035}
1036
1037static char const *storage_class_name(unsigned sc) {
1038    switch (sc) {
1039        case spv::StorageClassInput:
1040            return "input";
1041        case spv::StorageClassOutput:
1042            return "output";
1043        case spv::StorageClassUniformConstant:
1044            return "const uniform";
1045        case spv::StorageClassUniform:
1046            return "uniform";
1047        case spv::StorageClassWorkgroup:
1048            return "workgroup local";
1049        case spv::StorageClassCrossWorkgroup:
1050            return "workgroup global";
1051        case spv::StorageClassPrivate:
1052            return "private global";
1053        case spv::StorageClassFunction:
1054            return "function";
1055        case spv::StorageClassGeneric:
1056            return "generic";
1057        case spv::StorageClassAtomicCounter:
1058            return "atomic counter";
1059        case spv::StorageClassImage:
1060            return "image";
1061        case spv::StorageClassPushConstant:
1062            return "push constant";
1063        default:
1064            return "unknown";
1065    }
1066}
1067
1068// Get the value of an integral constant
1069unsigned get_constant_value(shader_module const *src, unsigned id) {
1070    auto value = src->get_def(id);
1071    assert(value != src->end());
1072
1073    if (value.opcode() != spv::OpConstant) {
1074        // TODO: Either ensure that the specialization transform is already performed on a module we're
1075        //       considering here, OR -- specialize on the fly now.
1076        return 1;
1077    }
1078
1079    return value.word(3);
1080}
1081
1082static void describe_type_inner(std::ostringstream &ss, shader_module const *src, unsigned type) {
1083    auto insn = src->get_def(type);
1084    assert(insn != src->end());
1085
1086    switch (insn.opcode()) {
1087        case spv::OpTypeBool:
1088            ss << "bool";
1089            break;
1090        case spv::OpTypeInt:
1091            ss << (insn.word(3) ? 's' : 'u') << "int" << insn.word(2);
1092            break;
1093        case spv::OpTypeFloat:
1094            ss << "float" << insn.word(2);
1095            break;
1096        case spv::OpTypeVector:
1097            ss << "vec" << insn.word(3) << " of ";
1098            describe_type_inner(ss, src, insn.word(2));
1099            break;
1100        case spv::OpTypeMatrix:
1101            ss << "mat" << insn.word(3) << " of ";
1102            describe_type_inner(ss, src, insn.word(2));
1103            break;
1104        case spv::OpTypeArray:
1105            ss << "arr[" << get_constant_value(src, insn.word(3)) << "] of ";
1106            describe_type_inner(ss, src, insn.word(2));
1107            break;
1108        case spv::OpTypePointer:
1109            ss << "ptr to " << storage_class_name(insn.word(2)) << " ";
1110            describe_type_inner(ss, src, insn.word(3));
1111            break;
1112        case spv::OpTypeStruct: {
1113            ss << "struct of (";
1114            for (unsigned i = 2; i < insn.len(); i++) {
1115                describe_type_inner(ss, src, insn.word(i));
1116                if (i == insn.len() - 1) {
1117                    ss << ")";
1118                } else {
1119                    ss << ", ";
1120                }
1121            }
1122            break;
1123        }
1124        case spv::OpTypeSampler:
1125            ss << "sampler";
1126            break;
1127        case spv::OpTypeSampledImage:
1128            ss << "sampler+";
1129            describe_type_inner(ss, src, insn.word(2));
1130            break;
1131        case spv::OpTypeImage:
1132            ss << "image(dim=" << insn.word(3) << ", sampled=" << insn.word(7) << ")";
1133            break;
1134        default:
1135            ss << "oddtype";
1136            break;
1137    }
1138}
1139
1140static std::string describe_type(shader_module const *src, unsigned type) {
1141    std::ostringstream ss;
1142    describe_type_inner(ss, src, type);
1143    return ss.str();
1144}
1145
1146static bool is_narrow_numeric_type(spirv_inst_iter type) {
1147    if (type.opcode() != spv::OpTypeInt && type.opcode() != spv::OpTypeFloat) return false;
1148    return type.word(2) < 64;
1149}
1150
1151static bool types_match(shader_module const *a, shader_module const *b, unsigned a_type, unsigned b_type, bool a_arrayed,
1152                        bool b_arrayed, bool relaxed) {
1153    // Walk two type trees together, and complain about differences
1154    auto a_insn = a->get_def(a_type);
1155    auto b_insn = b->get_def(b_type);
1156    assert(a_insn != a->end());
1157    assert(b_insn != b->end());
1158
1159    if (a_arrayed && a_insn.opcode() == spv::OpTypeArray) {
1160        return types_match(a, b, a_insn.word(2), b_type, false, b_arrayed, relaxed);
1161    }
1162
1163    if (b_arrayed && b_insn.opcode() == spv::OpTypeArray) {
1164        // We probably just found the extra level of arrayness in b_type: compare the type inside it to a_type
1165        return types_match(a, b, a_type, b_insn.word(2), a_arrayed, false, relaxed);
1166    }
1167
1168    if (a_insn.opcode() == spv::OpTypeVector && relaxed && is_narrow_numeric_type(b_insn)) {
1169        return types_match(a, b, a_insn.word(2), b_type, a_arrayed, b_arrayed, false);
1170    }
1171
1172    if (a_insn.opcode() != b_insn.opcode()) {
1173        return false;
1174    }
1175
1176    if (a_insn.opcode() == spv::OpTypePointer) {
1177        // Match on pointee type. storage class is expected to differ
1178        return types_match(a, b, a_insn.word(3), b_insn.word(3), a_arrayed, b_arrayed, relaxed);
1179    }
1180
1181    if (a_arrayed || b_arrayed) {
1182        // If we havent resolved array-of-verts by here, we're not going to.
1183        return false;
1184    }
1185
1186    switch (a_insn.opcode()) {
1187        case spv::OpTypeBool:
1188            return true;
1189        case spv::OpTypeInt:
1190            // Match on width, signedness
1191            return a_insn.word(2) == b_insn.word(2) && a_insn.word(3) == b_insn.word(3);
1192        case spv::OpTypeFloat:
1193            // Match on width
1194            return a_insn.word(2) == b_insn.word(2);
1195        case spv::OpTypeVector:
1196            // Match on element type, count.
1197            if (!types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false)) return false;
1198            if (relaxed && is_narrow_numeric_type(a->get_def(a_insn.word(2)))) {
1199                return a_insn.word(3) >= b_insn.word(3);
1200            } else {
1201                return a_insn.word(3) == b_insn.word(3);
1202            }
1203        case spv::OpTypeMatrix:
1204            // Match on element type, count.
1205            return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false) &&
1206                   a_insn.word(3) == b_insn.word(3);
1207        case spv::OpTypeArray:
1208            // Match on element type, count. these all have the same layout. we don't get here if b_arrayed. This differs from
1209            // vector & matrix types in that the array size is the id of a constant instruction, * not a literal within OpTypeArray
1210            return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false) &&
1211                   get_constant_value(a, a_insn.word(3)) == get_constant_value(b, b_insn.word(3));
1212        case spv::OpTypeStruct:
1213            // Match on all element types
1214            {
1215                if (a_insn.len() != b_insn.len()) {
1216                    return false;  // Structs cannot match if member counts differ
1217                }
1218
1219                for (unsigned i = 2; i < a_insn.len(); i++) {
1220                    if (!types_match(a, b, a_insn.word(i), b_insn.word(i), a_arrayed, b_arrayed, false)) {
1221                        return false;
1222                    }
1223                }
1224
1225                return true;
1226            }
1227        default:
1228            // Remaining types are CLisms, or may not appear in the interfaces we are interested in. Just claim no match.
1229            return false;
1230    }
1231}
1232
1233static int value_or_default(std::unordered_map<unsigned, unsigned> const &map, unsigned id, int def) {
1234    auto it = map.find(id);
1235    if (it == map.end())
1236        return def;
1237    else
1238        return it->second;
1239}
1240
1241static unsigned get_locations_consumed_by_type(shader_module const *src, unsigned type, bool strip_array_level) {
1242    auto insn = src->get_def(type);
1243    assert(insn != src->end());
1244
1245    switch (insn.opcode()) {
1246        case spv::OpTypePointer:
1247            // See through the ptr -- this is only ever at the toplevel for graphics shaders we're never actually passing
1248            // pointers around.
1249            return get_locations_consumed_by_type(src, insn.word(3), strip_array_level);
1250        case spv::OpTypeArray:
1251            if (strip_array_level) {
1252                return get_locations_consumed_by_type(src, insn.word(2), false);
1253            } else {
1254                return get_constant_value(src, insn.word(3)) * get_locations_consumed_by_type(src, insn.word(2), false);
1255            }
1256        case spv::OpTypeMatrix:
1257            // Num locations is the dimension * element size
1258            return insn.word(3) * get_locations_consumed_by_type(src, insn.word(2), false);
1259        case spv::OpTypeVector: {
1260            auto scalar_type = src->get_def(insn.word(2));
1261            auto bit_width =
1262                (scalar_type.opcode() == spv::OpTypeInt || scalar_type.opcode() == spv::OpTypeFloat) ? scalar_type.word(2) : 32;
1263
1264            // Locations are 128-bit wide; 3- and 4-component vectors of 64 bit types require two.
1265            return (bit_width * insn.word(3) + 127) / 128;
1266        }
1267        default:
1268            // Everything else is just 1.
1269            return 1;
1270
1271            // TODO: extend to handle 64bit scalar types, whose vectors may need multiple locations.
1272    }
1273}
1274
1275static unsigned get_locations_consumed_by_format(VkFormat format) {
1276    switch (format) {
1277        case VK_FORMAT_R64G64B64A64_SFLOAT:
1278        case VK_FORMAT_R64G64B64A64_SINT:
1279        case VK_FORMAT_R64G64B64A64_UINT:
1280        case VK_FORMAT_R64G64B64_SFLOAT:
1281        case VK_FORMAT_R64G64B64_SINT:
1282        case VK_FORMAT_R64G64B64_UINT:
1283            return 2;
1284        default:
1285            return 1;
1286    }
1287}
1288
1289typedef std::pair<unsigned, unsigned> location_t;
1290typedef std::pair<unsigned, unsigned> descriptor_slot_t;
1291
1292struct interface_var {
1293    uint32_t id;
1294    uint32_t type_id;
1295    uint32_t offset;
1296    bool is_patch;
1297    bool is_block_member;
1298    bool is_relaxed_precision;
1299    // TODO: collect the name, too? Isn't required to be present.
1300};
1301
1302struct shader_stage_attributes {
1303    char const *const name;
1304    bool arrayed_input;
1305    bool arrayed_output;
1306};
1307
1308static shader_stage_attributes shader_stage_attribs[] = {
1309    {"vertex shader", false, false},  {"tessellation control shader", true, true}, {"tessellation evaluation shader", true, false},
1310    {"geometry shader", true, false}, {"fragment shader", false, false},
1311};
1312
1313static spirv_inst_iter get_struct_type(shader_module const *src, spirv_inst_iter def, bool is_array_of_verts) {
1314    while (true) {
1315        if (def.opcode() == spv::OpTypePointer) {
1316            def = src->get_def(def.word(3));
1317        } else if (def.opcode() == spv::OpTypeArray && is_array_of_verts) {
1318            def = src->get_def(def.word(2));
1319            is_array_of_verts = false;
1320        } else if (def.opcode() == spv::OpTypeStruct) {
1321            return def;
1322        } else {
1323            return src->end();
1324        }
1325    }
1326}
1327
1328static void collect_interface_block_members(shader_module const *src, std::map<location_t, interface_var> *out,
1329                                            std::unordered_map<unsigned, unsigned> const &blocks, bool is_array_of_verts,
1330                                            uint32_t id, uint32_t type_id, bool is_patch) {
1331    // Walk down the type_id presented, trying to determine whether it's actually an interface block.
1332    auto type = get_struct_type(src, src->get_def(type_id), is_array_of_verts && !is_patch);
1333    if (type == src->end() || blocks.find(type.word(1)) == blocks.end()) {
1334        // This isn't an interface block.
1335        return;
1336    }
1337
1338    std::unordered_map<unsigned, unsigned> member_components;
1339    std::unordered_map<unsigned, unsigned> member_relaxed_precision;
1340
1341    // Walk all the OpMemberDecorate for type's result id -- first pass, collect components.
1342    for (auto insn : *src) {
1343        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1344            unsigned member_index = insn.word(2);
1345
1346            if (insn.word(3) == spv::DecorationComponent) {
1347                unsigned component = insn.word(4);
1348                member_components[member_index] = component;
1349            }
1350
1351            if (insn.word(3) == spv::DecorationRelaxedPrecision) {
1352                member_relaxed_precision[member_index] = 1;
1353            }
1354        }
1355    }
1356
1357    // Second pass -- produce the output, from Location decorations
1358    for (auto insn : *src) {
1359        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1360            unsigned member_index = insn.word(2);
1361            unsigned member_type_id = type.word(2 + member_index);
1362
1363            if (insn.word(3) == spv::DecorationLocation) {
1364                unsigned location = insn.word(4);
1365                unsigned num_locations = get_locations_consumed_by_type(src, member_type_id, false);
1366                auto component_it = member_components.find(member_index);
1367                unsigned component = component_it == member_components.end() ? 0 : component_it->second;
1368                bool is_relaxed_precision = member_relaxed_precision.find(member_index) != member_relaxed_precision.end();
1369
1370                for (unsigned int offset = 0; offset < num_locations; offset++) {
1371                    interface_var v = {};
1372                    v.id = id;
1373                    // TODO: member index in interface_var too?
1374                    v.type_id = member_type_id;
1375                    v.offset = offset;
1376                    v.is_patch = is_patch;
1377                    v.is_block_member = true;
1378                    v.is_relaxed_precision = is_relaxed_precision;
1379                    (*out)[std::make_pair(location + offset, component)] = v;
1380                }
1381            }
1382        }
1383    }
1384}
1385
1386static std::map<location_t, interface_var> collect_interface_by_location(shader_module const *src, spirv_inst_iter entrypoint,
1387                                                                         spv::StorageClass sinterface, bool is_array_of_verts) {
1388    std::unordered_map<unsigned, unsigned> var_locations;
1389    std::unordered_map<unsigned, unsigned> var_builtins;
1390    std::unordered_map<unsigned, unsigned> var_components;
1391    std::unordered_map<unsigned, unsigned> blocks;
1392    std::unordered_map<unsigned, unsigned> var_patch;
1393    std::unordered_map<unsigned, unsigned> var_relaxed_precision;
1394
1395    for (auto insn : *src) {
1396        // We consider two interface models: SSO rendezvous-by-location, and builtins. Complain about anything that
1397        // fits neither model.
1398        if (insn.opcode() == spv::OpDecorate) {
1399            if (insn.word(2) == spv::DecorationLocation) {
1400                var_locations[insn.word(1)] = insn.word(3);
1401            }
1402
1403            if (insn.word(2) == spv::DecorationBuiltIn) {
1404                var_builtins[insn.word(1)] = insn.word(3);
1405            }
1406
1407            if (insn.word(2) == spv::DecorationComponent) {
1408                var_components[insn.word(1)] = insn.word(3);
1409            }
1410
1411            if (insn.word(2) == spv::DecorationBlock) {
1412                blocks[insn.word(1)] = 1;
1413            }
1414
1415            if (insn.word(2) == spv::DecorationPatch) {
1416                var_patch[insn.word(1)] = 1;
1417            }
1418
1419            if (insn.word(2) == spv::DecorationRelaxedPrecision) {
1420                var_relaxed_precision[insn.word(1)] = 1;
1421            }
1422        }
1423    }
1424
1425    // TODO: handle grouped decorations
1426    // TODO: handle index=1 dual source outputs from FS -- two vars will have the same location, and we DON'T want to clobber.
1427
1428    // Find the end of the entrypoint's name string. additional zero bytes follow the actual null terminator, to fill out the
1429    // rest of the word - so we only need to look at the last byte in the word to determine which word contains the terminator.
1430    uint32_t word = 3;
1431    while (entrypoint.word(word) & 0xff000000u) {
1432        ++word;
1433    }
1434    ++word;
1435
1436    std::map<location_t, interface_var> out;
1437
1438    for (; word < entrypoint.len(); word++) {
1439        auto insn = src->get_def(entrypoint.word(word));
1440        assert(insn != src->end());
1441        assert(insn.opcode() == spv::OpVariable);
1442
1443        if (insn.word(3) == static_cast<uint32_t>(sinterface)) {
1444            unsigned id = insn.word(2);
1445            unsigned type = insn.word(1);
1446
1447            int location = value_or_default(var_locations, id, -1);
1448            int builtin = value_or_default(var_builtins, id, -1);
1449            unsigned component = value_or_default(var_components, id, 0);  // Unspecified is OK, is 0
1450            bool is_patch = var_patch.find(id) != var_patch.end();
1451            bool is_relaxed_precision = var_relaxed_precision.find(id) != var_relaxed_precision.end();
1452
1453            // All variables and interface block members in the Input or Output storage classes must be decorated with either
1454            // a builtin or an explicit location.
1455            //
1456            // TODO: integrate the interface block support here. For now, don't complain -- a valid SPIRV module will only hit
1457            // this path for the interface block case, as the individual members of the type are decorated, rather than
1458            // variable declarations.
1459
1460            if (location != -1) {
1461                // A user-defined interface variable, with a location. Where a variable occupied multiple locations, emit
1462                // one result for each.
1463                unsigned num_locations = get_locations_consumed_by_type(src, type, is_array_of_verts && !is_patch);
1464                for (unsigned int offset = 0; offset < num_locations; offset++) {
1465                    interface_var v = {};
1466                    v.id = id;
1467                    v.type_id = type;
1468                    v.offset = offset;
1469                    v.is_patch = is_patch;
1470                    v.is_relaxed_precision = is_relaxed_precision;
1471                    out[std::make_pair(location + offset, component)] = v;
1472                }
1473            } else if (builtin == -1) {
1474                // An interface block instance
1475                collect_interface_block_members(src, &out, blocks, is_array_of_verts, id, type, is_patch);
1476            }
1477        }
1478    }
1479
1480    return out;
1481}
1482
1483static std::vector<std::pair<uint32_t, interface_var>> collect_interface_by_input_attachment_index(
1484    debug_report_data *report_data, shader_module const *src, std::unordered_set<uint32_t> const &accessible_ids) {
1485    std::vector<std::pair<uint32_t, interface_var>> out;
1486
1487    for (auto insn : *src) {
1488        if (insn.opcode() == spv::OpDecorate) {
1489            if (insn.word(2) == spv::DecorationInputAttachmentIndex) {
1490                auto attachment_index = insn.word(3);
1491                auto id = insn.word(1);
1492
1493                if (accessible_ids.count(id)) {
1494                    auto def = src->get_def(id);
1495                    assert(def != src->end());
1496
1497                    if (def.opcode() == spv::OpVariable && insn.word(3) == spv::StorageClassUniformConstant) {
1498                        auto num_locations = get_locations_consumed_by_type(src, def.word(1), false);
1499                        for (unsigned int offset = 0; offset < num_locations; offset++) {
1500                            interface_var v = {};
1501                            v.id = id;
1502                            v.type_id = def.word(1);
1503                            v.offset = offset;
1504                            out.emplace_back(attachment_index + offset, v);
1505                        }
1506                    }
1507                }
1508            }
1509        }
1510    }
1511
1512    return out;
1513}
1514
1515static std::vector<std::pair<descriptor_slot_t, interface_var>> collect_interface_by_descriptor_slot(
1516    debug_report_data *report_data, shader_module const *src, std::unordered_set<uint32_t> const &accessible_ids) {
1517    std::unordered_map<unsigned, unsigned> var_sets;
1518    std::unordered_map<unsigned, unsigned> var_bindings;
1519
1520    for (auto insn : *src) {
1521        // All variables in the Uniform or UniformConstant storage classes are required to be decorated with both
1522        // DecorationDescriptorSet and DecorationBinding.
1523        if (insn.opcode() == spv::OpDecorate) {
1524            if (insn.word(2) == spv::DecorationDescriptorSet) {
1525                var_sets[insn.word(1)] = insn.word(3);
1526            }
1527
1528            if (insn.word(2) == spv::DecorationBinding) {
1529                var_bindings[insn.word(1)] = insn.word(3);
1530            }
1531        }
1532    }
1533
1534    std::vector<std::pair<descriptor_slot_t, interface_var>> out;
1535
1536    for (auto id : accessible_ids) {
1537        auto insn = src->get_def(id);
1538        assert(insn != src->end());
1539
1540        if (insn.opcode() == spv::OpVariable &&
1541            (insn.word(3) == spv::StorageClassUniform || insn.word(3) == spv::StorageClassUniformConstant)) {
1542            unsigned set = value_or_default(var_sets, insn.word(2), 0);
1543            unsigned binding = value_or_default(var_bindings, insn.word(2), 0);
1544
1545            interface_var v = {};
1546            v.id = insn.word(2);
1547            v.type_id = insn.word(1);
1548            out.emplace_back(std::make_pair(set, binding), v);
1549        }
1550    }
1551
1552    return out;
1553}
1554
1555static bool validate_interface_between_stages(debug_report_data *report_data, shader_module const *producer,
1556                                              spirv_inst_iter producer_entrypoint, shader_stage_attributes const *producer_stage,
1557                                              shader_module const *consumer, spirv_inst_iter consumer_entrypoint,
1558                                              shader_stage_attributes const *consumer_stage) {
1559    bool pass = true;
1560
1561    auto outputs =
1562        collect_interface_by_location(producer, producer_entrypoint, spv::StorageClassOutput, producer_stage->arrayed_output);
1563    auto inputs =
1564        collect_interface_by_location(consumer, consumer_entrypoint, spv::StorageClassInput, consumer_stage->arrayed_input);
1565
1566    auto a_it = outputs.begin();
1567    auto b_it = inputs.begin();
1568
1569    // Maps sorted by key (location); walk them together to find mismatches
1570    while ((outputs.size() > 0 && a_it != outputs.end()) || (inputs.size() && b_it != inputs.end())) {
1571        bool a_at_end = outputs.size() == 0 || a_it == outputs.end();
1572        bool b_at_end = inputs.size() == 0 || b_it == inputs.end();
1573        auto a_first = a_at_end ? std::make_pair(0u, 0u) : a_it->first;
1574        auto b_first = b_at_end ? std::make_pair(0u, 0u) : b_it->first;
1575
1576        if (b_at_end || ((!a_at_end) && (a_first < b_first))) {
1577            if (log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
1578                        SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC", "%s writes to output location %u.%u which is not consumed by %s",
1579                        producer_stage->name, a_first.first, a_first.second, consumer_stage->name)) {
1580                pass = false;
1581            }
1582            a_it++;
1583        } else if (a_at_end || a_first > b_first) {
1584            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
1585                        SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "%s consumes input location %u.%u which is not written by %s",
1586                        consumer_stage->name, b_first.first, b_first.second, producer_stage->name)) {
1587                pass = false;
1588            }
1589            b_it++;
1590        } else {
1591            // subtleties of arrayed interfaces:
1592            // - if is_patch, then the member is not arrayed, even though the interface may be.
1593            // - if is_block_member, then the extra array level of an arrayed interface is not
1594            //   expressed in the member type -- it's expressed in the block type.
1595            if (!types_match(producer, consumer, a_it->second.type_id, b_it->second.type_id,
1596                             producer_stage->arrayed_output && !a_it->second.is_patch && !a_it->second.is_block_member,
1597                             consumer_stage->arrayed_input && !b_it->second.is_patch && !b_it->second.is_block_member, true)) {
1598                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
1599                            SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC", "Type mismatch on location %u.%u: '%s' vs '%s'",
1600                            a_first.first, a_first.second, describe_type(producer, a_it->second.type_id).c_str(),
1601                            describe_type(consumer, b_it->second.type_id).c_str())) {
1602                    pass = false;
1603                }
1604            }
1605            if (a_it->second.is_patch != b_it->second.is_patch) {
1606                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 0, __LINE__,
1607                            SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1608                            "Decoration mismatch on location %u.%u: is per-%s in %s stage but "
1609                            "per-%s in %s stage",
1610                            a_first.first, a_first.second, a_it->second.is_patch ? "patch" : "vertex", producer_stage->name,
1611                            b_it->second.is_patch ? "patch" : "vertex", consumer_stage->name)) {
1612                    pass = false;
1613                }
1614            }
1615            if (a_it->second.is_relaxed_precision != b_it->second.is_relaxed_precision) {
1616                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 0, __LINE__,
1617                            SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1618                            "Decoration mismatch on location %u.%u: %s and %s stages differ in precision", a_first.first,
1619                            a_first.second, producer_stage->name, consumer_stage->name)) {
1620                    pass = false;
1621                }
1622            }
1623            a_it++;
1624            b_it++;
1625        }
1626    }
1627
1628    return pass;
1629}
1630
1631enum FORMAT_TYPE {
1632    FORMAT_TYPE_UNDEFINED,
1633    FORMAT_TYPE_FLOAT,  // UNORM, SNORM, FLOAT, USCALED, SSCALED, SRGB -- anything we consider float in the shader
1634    FORMAT_TYPE_SINT,
1635    FORMAT_TYPE_UINT,
1636};
1637
1638static unsigned get_format_type(VkFormat fmt) {
1639    switch (fmt) {
1640        case VK_FORMAT_UNDEFINED:
1641            return FORMAT_TYPE_UNDEFINED;
1642        case VK_FORMAT_R8_SINT:
1643        case VK_FORMAT_R8G8_SINT:
1644        case VK_FORMAT_R8G8B8_SINT:
1645        case VK_FORMAT_R8G8B8A8_SINT:
1646        case VK_FORMAT_R16_SINT:
1647        case VK_FORMAT_R16G16_SINT:
1648        case VK_FORMAT_R16G16B16_SINT:
1649        case VK_FORMAT_R16G16B16A16_SINT:
1650        case VK_FORMAT_R32_SINT:
1651        case VK_FORMAT_R32G32_SINT:
1652        case VK_FORMAT_R32G32B32_SINT:
1653        case VK_FORMAT_R32G32B32A32_SINT:
1654        case VK_FORMAT_R64_SINT:
1655        case VK_FORMAT_R64G64_SINT:
1656        case VK_FORMAT_R64G64B64_SINT:
1657        case VK_FORMAT_R64G64B64A64_SINT:
1658        case VK_FORMAT_B8G8R8_SINT:
1659        case VK_FORMAT_B8G8R8A8_SINT:
1660        case VK_FORMAT_A8B8G8R8_SINT_PACK32:
1661        case VK_FORMAT_A2B10G10R10_SINT_PACK32:
1662        case VK_FORMAT_A2R10G10B10_SINT_PACK32:
1663            return FORMAT_TYPE_SINT;
1664        case VK_FORMAT_R8_UINT:
1665        case VK_FORMAT_R8G8_UINT:
1666        case VK_FORMAT_R8G8B8_UINT:
1667        case VK_FORMAT_R8G8B8A8_UINT:
1668        case VK_FORMAT_R16_UINT:
1669        case VK_FORMAT_R16G16_UINT:
1670        case VK_FORMAT_R16G16B16_UINT:
1671        case VK_FORMAT_R16G16B16A16_UINT:
1672        case VK_FORMAT_R32_UINT:
1673        case VK_FORMAT_R32G32_UINT:
1674        case VK_FORMAT_R32G32B32_UINT:
1675        case VK_FORMAT_R32G32B32A32_UINT:
1676        case VK_FORMAT_R64_UINT:
1677        case VK_FORMAT_R64G64_UINT:
1678        case VK_FORMAT_R64G64B64_UINT:
1679        case VK_FORMAT_R64G64B64A64_UINT:
1680        case VK_FORMAT_B8G8R8_UINT:
1681        case VK_FORMAT_B8G8R8A8_UINT:
1682        case VK_FORMAT_A8B8G8R8_UINT_PACK32:
1683        case VK_FORMAT_A2B10G10R10_UINT_PACK32:
1684        case VK_FORMAT_A2R10G10B10_UINT_PACK32:
1685            return FORMAT_TYPE_UINT;
1686        default:
1687            return FORMAT_TYPE_FLOAT;
1688    }
1689}
1690
1691// characterizes a SPIR-V type appearing in an interface to a FF stage, for comparison to a VkFormat's characterization above.
1692static unsigned get_fundamental_type(shader_module const *src, unsigned type) {
1693    auto insn = src->get_def(type);
1694    assert(insn != src->end());
1695
1696    switch (insn.opcode()) {
1697        case spv::OpTypeInt:
1698            return insn.word(3) ? FORMAT_TYPE_SINT : FORMAT_TYPE_UINT;
1699        case spv::OpTypeFloat:
1700            return FORMAT_TYPE_FLOAT;
1701        case spv::OpTypeVector:
1702            return get_fundamental_type(src, insn.word(2));
1703        case spv::OpTypeMatrix:
1704            return get_fundamental_type(src, insn.word(2));
1705        case spv::OpTypeArray:
1706            return get_fundamental_type(src, insn.word(2));
1707        case spv::OpTypePointer:
1708            return get_fundamental_type(src, insn.word(3));
1709        case spv::OpTypeImage:
1710            return get_fundamental_type(src, insn.word(2));
1711
1712        default:
1713            return FORMAT_TYPE_UNDEFINED;
1714    }
1715}
1716
1717static uint32_t get_shader_stage_id(VkShaderStageFlagBits stage) {
1718    uint32_t bit_pos = u_ffs(stage);
1719    return bit_pos - 1;
1720}
1721
1722static bool validate_vi_consistency(debug_report_data *report_data, VkPipelineVertexInputStateCreateInfo const *vi) {
1723    // Walk the binding descriptions, which describe the step rate and stride of each vertex buffer.  Each binding should
1724    // be specified only once.
1725    std::unordered_map<uint32_t, VkVertexInputBindingDescription const *> bindings;
1726    bool pass = true;
1727
1728    for (unsigned i = 0; i < vi->vertexBindingDescriptionCount; i++) {
1729        auto desc = &vi->pVertexBindingDescriptions[i];
1730        auto &binding = bindings[desc->binding];
1731        if (binding) {
1732            // TODO: VALIDATION_ERROR_02105 perhaps?
1733            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
1734                        SHADER_CHECKER_INCONSISTENT_VI, "SC", "Duplicate vertex input binding descriptions for binding %d",
1735                        desc->binding)) {
1736                pass = false;
1737            }
1738        } else {
1739            binding = desc;
1740        }
1741    }
1742
1743    return pass;
1744}
1745
1746static bool validate_vi_against_vs_inputs(debug_report_data *report_data, VkPipelineVertexInputStateCreateInfo const *vi,
1747                                          shader_module const *vs, spirv_inst_iter entrypoint) {
1748    bool pass = true;
1749
1750    auto inputs = collect_interface_by_location(vs, entrypoint, spv::StorageClassInput, false);
1751
1752    // Build index by location
1753    std::map<uint32_t, VkVertexInputAttributeDescription const *> attribs;
1754    if (vi) {
1755        for (unsigned i = 0; i < vi->vertexAttributeDescriptionCount; i++) {
1756            auto num_locations = get_locations_consumed_by_format(vi->pVertexAttributeDescriptions[i].format);
1757            for (auto j = 0u; j < num_locations; j++) {
1758                attribs[vi->pVertexAttributeDescriptions[i].location + j] = &vi->pVertexAttributeDescriptions[i];
1759            }
1760        }
1761    }
1762
1763    auto it_a = attribs.begin();
1764    auto it_b = inputs.begin();
1765    bool used = false;
1766
1767    while ((attribs.size() > 0 && it_a != attribs.end()) || (inputs.size() > 0 && it_b != inputs.end())) {
1768        bool a_at_end = attribs.size() == 0 || it_a == attribs.end();
1769        bool b_at_end = inputs.size() == 0 || it_b == inputs.end();
1770        auto a_first = a_at_end ? 0 : it_a->first;
1771        auto b_first = b_at_end ? 0 : it_b->first.first;
1772        if (!a_at_end && (b_at_end || a_first < b_first)) {
1773            if (!used && log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1774                                 __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1775                                 "Vertex attribute at location %d not consumed by vertex shader", a_first)) {
1776                pass = false;
1777            }
1778            used = false;
1779            it_a++;
1780        } else if (!b_at_end && (a_at_end || b_first < a_first)) {
1781            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 0, __LINE__,
1782                        SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "Vertex shader consumes input at location %d but not provided",
1783                        b_first)) {
1784                pass = false;
1785            }
1786            it_b++;
1787        } else {
1788            unsigned attrib_type = get_format_type(it_a->second->format);
1789            unsigned input_type = get_fundamental_type(vs, it_b->second.type_id);
1790
1791            // Type checking
1792            if (attrib_type != FORMAT_TYPE_UNDEFINED && input_type != FORMAT_TYPE_UNDEFINED && attrib_type != input_type) {
1793                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
1794                            SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1795                            "Attribute type of `%s` at location %d does not match vertex shader input type of `%s`",
1796                            string_VkFormat(it_a->second->format), a_first, describe_type(vs, it_b->second.type_id).c_str())) {
1797                    pass = false;
1798                }
1799            }
1800
1801            // OK!
1802            used = true;
1803            it_b++;
1804        }
1805    }
1806
1807    return pass;
1808}
1809
1810static bool validate_fs_outputs_against_render_pass(debug_report_data *report_data, shader_module const *fs,
1811                                                    spirv_inst_iter entrypoint, VkRenderPassCreateInfo const *rpci,
1812                                                    uint32_t subpass_index) {
1813    std::map<uint32_t, VkFormat> color_attachments;
1814    auto subpass = rpci->pSubpasses[subpass_index];
1815    for (auto i = 0u; i < subpass.colorAttachmentCount; ++i) {
1816        uint32_t attachment = subpass.pColorAttachments[i].attachment;
1817        if (attachment == VK_ATTACHMENT_UNUSED) continue;
1818        if (rpci->pAttachments[attachment].format != VK_FORMAT_UNDEFINED) {
1819            color_attachments[i] = rpci->pAttachments[attachment].format;
1820        }
1821    }
1822
1823    bool pass = true;
1824
1825    // TODO: dual source blend index (spv::DecIndex, zero if not provided)
1826
1827    auto outputs = collect_interface_by_location(fs, entrypoint, spv::StorageClassOutput, false);
1828
1829    auto it_a = outputs.begin();
1830    auto it_b = color_attachments.begin();
1831
1832    // Walk attachment list and outputs together
1833
1834    while ((outputs.size() > 0 && it_a != outputs.end()) || (color_attachments.size() > 0 && it_b != color_attachments.end())) {
1835        bool a_at_end = outputs.size() == 0 || it_a == outputs.end();
1836        bool b_at_end = color_attachments.size() == 0 || it_b == color_attachments.end();
1837
1838        if (!a_at_end && (b_at_end || it_a->first.first < it_b->first)) {
1839            if (log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
1840                        SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1841                        "fragment shader writes to output location %d with no matching attachment", it_a->first.first)) {
1842                pass = false;
1843            }
1844            it_a++;
1845        } else if (!b_at_end && (a_at_end || it_a->first.first > it_b->first)) {
1846            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
1847                        SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "Attachment %d not written by fragment shader", it_b->first)) {
1848                pass = false;
1849            }
1850            it_b++;
1851        } else {
1852            unsigned output_type = get_fundamental_type(fs, it_a->second.type_id);
1853            unsigned att_type = get_format_type(it_b->second);
1854
1855            // Type checking
1856            if (att_type != FORMAT_TYPE_UNDEFINED && output_type != FORMAT_TYPE_UNDEFINED && att_type != output_type) {
1857                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
1858                            SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1859                            "Attachment %d of type `%s` does not match fragment shader output type of `%s`", it_b->first,
1860                            string_VkFormat(it_b->second), describe_type(fs, it_a->second.type_id).c_str())) {
1861                    pass = false;
1862                }
1863            }
1864
1865            // OK!
1866            it_a++;
1867            it_b++;
1868        }
1869    }
1870
1871    return pass;
1872}
1873
1874// For some analyses, we need to know about all ids referenced by the static call tree of a particular entrypoint. This is
1875// important for identifying the set of shader resources actually used by an entrypoint, for example.
1876// Note: we only explore parts of the image which might actually contain ids we care about for the above analyses.
1877//  - NOT the shader input/output interfaces.
1878//
1879// TODO: The set of interesting opcodes here was determined by eyeballing the SPIRV spec. It might be worth
1880// converting parts of this to be generated from the machine-readable spec instead.
1881static std::unordered_set<uint32_t> mark_accessible_ids(shader_module const *src, spirv_inst_iter entrypoint) {
1882    std::unordered_set<uint32_t> ids;
1883    std::unordered_set<uint32_t> worklist;
1884    worklist.insert(entrypoint.word(2));
1885
1886    while (!worklist.empty()) {
1887        auto id_iter = worklist.begin();
1888        auto id = *id_iter;
1889        worklist.erase(id_iter);
1890
1891        auto insn = src->get_def(id);
1892        if (insn == src->end()) {
1893            // ID is something we didn't collect in build_def_index. that's OK -- we'll stumble across all kinds of things here
1894            // that we may not care about.
1895            continue;
1896        }
1897
1898        // Try to add to the output set
1899        if (!ids.insert(id).second) {
1900            continue;  // If we already saw this id, we don't want to walk it again.
1901        }
1902
1903        switch (insn.opcode()) {
1904            case spv::OpFunction:
1905                // Scan whole body of the function, enlisting anything interesting
1906                while (++insn, insn.opcode() != spv::OpFunctionEnd) {
1907                    switch (insn.opcode()) {
1908                        case spv::OpLoad:
1909                        case spv::OpAtomicLoad:
1910                        case spv::OpAtomicExchange:
1911                        case spv::OpAtomicCompareExchange:
1912                        case spv::OpAtomicCompareExchangeWeak:
1913                        case spv::OpAtomicIIncrement:
1914                        case spv::OpAtomicIDecrement:
1915                        case spv::OpAtomicIAdd:
1916                        case spv::OpAtomicISub:
1917                        case spv::OpAtomicSMin:
1918                        case spv::OpAtomicUMin:
1919                        case spv::OpAtomicSMax:
1920                        case spv::OpAtomicUMax:
1921                        case spv::OpAtomicAnd:
1922                        case spv::OpAtomicOr:
1923                        case spv::OpAtomicXor:
1924                            worklist.insert(insn.word(3));  // ptr
1925                            break;
1926                        case spv::OpStore:
1927                        case spv::OpAtomicStore:
1928                            worklist.insert(insn.word(1));  // ptr
1929                            break;
1930                        case spv::OpAccessChain:
1931                        case spv::OpInBoundsAccessChain:
1932                            worklist.insert(insn.word(3));  // base ptr
1933                            break;
1934                        case spv::OpSampledImage:
1935                        case spv::OpImageSampleImplicitLod:
1936                        case spv::OpImageSampleExplicitLod:
1937                        case spv::OpImageSampleDrefImplicitLod:
1938                        case spv::OpImageSampleDrefExplicitLod:
1939                        case spv::OpImageSampleProjImplicitLod:
1940                        case spv::OpImageSampleProjExplicitLod:
1941                        case spv::OpImageSampleProjDrefImplicitLod:
1942                        case spv::OpImageSampleProjDrefExplicitLod:
1943                        case spv::OpImageFetch:
1944                        case spv::OpImageGather:
1945                        case spv::OpImageDrefGather:
1946                        case spv::OpImageRead:
1947                        case spv::OpImage:
1948                        case spv::OpImageQueryFormat:
1949                        case spv::OpImageQueryOrder:
1950                        case spv::OpImageQuerySizeLod:
1951                        case spv::OpImageQuerySize:
1952                        case spv::OpImageQueryLod:
1953                        case spv::OpImageQueryLevels:
1954                        case spv::OpImageQuerySamples:
1955                        case spv::OpImageSparseSampleImplicitLod:
1956                        case spv::OpImageSparseSampleExplicitLod:
1957                        case spv::OpImageSparseSampleDrefImplicitLod:
1958                        case spv::OpImageSparseSampleDrefExplicitLod:
1959                        case spv::OpImageSparseSampleProjImplicitLod:
1960                        case spv::OpImageSparseSampleProjExplicitLod:
1961                        case spv::OpImageSparseSampleProjDrefImplicitLod:
1962                        case spv::OpImageSparseSampleProjDrefExplicitLod:
1963                        case spv::OpImageSparseFetch:
1964                        case spv::OpImageSparseGather:
1965                        case spv::OpImageSparseDrefGather:
1966                        case spv::OpImageTexelPointer:
1967                            worklist.insert(insn.word(3));  // Image or sampled image
1968                            break;
1969                        case spv::OpImageWrite:
1970                            worklist.insert(insn.word(1));  // Image -- different operand order to above
1971                            break;
1972                        case spv::OpFunctionCall:
1973                            for (uint32_t i = 3; i < insn.len(); i++) {
1974                                worklist.insert(insn.word(i));  // fn itself, and all args
1975                            }
1976                            break;
1977
1978                        case spv::OpExtInst:
1979                            for (uint32_t i = 5; i < insn.len(); i++) {
1980                                worklist.insert(insn.word(i));  // Operands to ext inst
1981                            }
1982                            break;
1983                    }
1984                }
1985                break;
1986        }
1987    }
1988
1989    return ids;
1990}
1991
1992static bool validate_push_constant_block_against_pipeline(debug_report_data *report_data,
1993                                                          std::vector<VkPushConstantRange> const *push_constant_ranges,
1994                                                          shader_module const *src, spirv_inst_iter type,
1995                                                          VkShaderStageFlagBits stage) {
1996    bool pass = true;
1997
1998    // Strip off ptrs etc
1999    type = get_struct_type(src, type, false);
2000    assert(type != src->end());
2001
2002    // Validate directly off the offsets. this isn't quite correct for arrays and matrices, but is a good first step.
2003    // TODO: arrays, matrices, weird sizes
2004    for (auto insn : *src) {
2005        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
2006            if (insn.word(3) == spv::DecorationOffset) {
2007                unsigned offset = insn.word(4);
2008                auto size = 4;  // Bytes; TODO: calculate this based on the type
2009
2010                bool found_range = false;
2011                for (auto const &range : *push_constant_ranges) {
2012                    if (range.offset <= offset && range.offset + range.size >= offset + size) {
2013                        found_range = true;
2014
2015                        if ((range.stageFlags & stage) == 0) {
2016                            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2017                                        SHADER_CHECKER_PUSH_CONSTANT_NOT_ACCESSIBLE_FROM_STAGE, "SC",
2018                                        "Push constant range covering variable starting at "
2019                                        "offset %u not accessible from stage %s",
2020                                        offset, string_VkShaderStageFlagBits(stage))) {
2021                                pass = false;
2022                            }
2023                        }
2024
2025                        break;
2026                    }
2027                }
2028
2029                if (!found_range) {
2030                    if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2031                                SHADER_CHECKER_PUSH_CONSTANT_OUT_OF_RANGE, "SC",
2032                                "Push constant range covering variable starting at "
2033                                "offset %u not declared in layout",
2034                                offset)) {
2035                        pass = false;
2036                    }
2037                }
2038            }
2039        }
2040    }
2041
2042    return pass;
2043}
2044
2045static bool validate_push_constant_usage(debug_report_data *report_data,
2046                                         std::vector<VkPushConstantRange> const *push_constant_ranges, shader_module const *src,
2047                                         std::unordered_set<uint32_t> accessible_ids, VkShaderStageFlagBits stage) {
2048    bool pass = true;
2049
2050    for (auto id : accessible_ids) {
2051        auto def_insn = src->get_def(id);
2052        if (def_insn.opcode() == spv::OpVariable && def_insn.word(3) == spv::StorageClassPushConstant) {
2053            pass &= validate_push_constant_block_against_pipeline(report_data, push_constant_ranges, src,
2054                                                                  src->get_def(def_insn.word(1)), stage);
2055        }
2056    }
2057
2058    return pass;
2059}
2060
2061// For given pipelineLayout verify that the set_layout_node at slot.first
2062//  has the requested binding at slot.second and return ptr to that binding
2063static VkDescriptorSetLayoutBinding const *get_descriptor_binding(PIPELINE_LAYOUT_NODE const *pipelineLayout,
2064                                                                  descriptor_slot_t slot) {
2065    if (!pipelineLayout) return nullptr;
2066
2067    if (slot.first >= pipelineLayout->set_layouts.size()) return nullptr;
2068
2069    return pipelineLayout->set_layouts[slot.first]->GetDescriptorSetLayoutBindingPtrFromBinding(slot.second);
2070}
2071
2072// Check object status for selected flag state
2073static bool validate_status(layer_data *dev_data, GLOBAL_CB_NODE *pNode, CBStatusFlags status_mask, VkFlags msg_flags,
2074                            const char *fail_msg, UNIQUE_VALIDATION_ERROR_CODE const msg_code) {
2075    if (!(pNode->status & status_mask)) {
2076        char const *const message = validation_error_map[msg_code];
2077        return log_msg(dev_data->report_data, msg_flags, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2078                       reinterpret_cast<const uint64_t &>(pNode->commandBuffer), __LINE__, msg_code, "DS",
2079                       "command buffer object 0x%p: %s. %s.", pNode->commandBuffer, fail_msg, message);
2080    }
2081    return false;
2082}
2083
2084// Retrieve pipeline node ptr for given pipeline object
2085static PIPELINE_STATE *getPipelineState(layer_data const *dev_data, VkPipeline pipeline) {
2086    auto it = dev_data->pipelineMap.find(pipeline);
2087    if (it == dev_data->pipelineMap.end()) {
2088        return nullptr;
2089    }
2090    return it->second;
2091}
2092
2093RENDER_PASS_STATE *getRenderPassState(layer_data const *dev_data, VkRenderPass renderpass) {
2094    auto it = dev_data->renderPassMap.find(renderpass);
2095    if (it == dev_data->renderPassMap.end()) {
2096        return nullptr;
2097    }
2098    return it->second.get();
2099}
2100
2101FRAMEBUFFER_STATE *getFramebufferState(const layer_data *dev_data, VkFramebuffer framebuffer) {
2102    auto it = dev_data->frameBufferMap.find(framebuffer);
2103    if (it == dev_data->frameBufferMap.end()) {
2104        return nullptr;
2105    }
2106    return it->second.get();
2107}
2108
2109cvdescriptorset::DescriptorSetLayout const *getDescriptorSetLayout(layer_data const *dev_data, VkDescriptorSetLayout dsLayout) {
2110    auto it = dev_data->descriptorSetLayoutMap.find(dsLayout);
2111    if (it == dev_data->descriptorSetLayoutMap.end()) {
2112        return nullptr;
2113    }
2114    return it->second;
2115}
2116
2117static PIPELINE_LAYOUT_NODE const *getPipelineLayout(layer_data const *dev_data, VkPipelineLayout pipeLayout) {
2118    auto it = dev_data->pipelineLayoutMap.find(pipeLayout);
2119    if (it == dev_data->pipelineLayoutMap.end()) {
2120        return nullptr;
2121    }
2122    return &it->second;
2123}
2124
2125VkPhysicalDeviceLimits GetPhysicalDeviceLimits(layer_data const *dev_data) {
2126    return dev_data->phys_dev_properties.properties.limits;
2127}
2128
2129// Return true if for a given PSO, the given state enum is dynamic, else return false
2130static bool isDynamic(const PIPELINE_STATE *pPipeline, const VkDynamicState state) {
2131    if (pPipeline && pPipeline->graphicsPipelineCI.pDynamicState) {
2132        for (uint32_t i = 0; i < pPipeline->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
2133            if (state == pPipeline->graphicsPipelineCI.pDynamicState->pDynamicStates[i]) return true;
2134        }
2135    }
2136    return false;
2137}
2138
2139// Validate state stored as flags at time of draw call
2140static bool validate_draw_state_flags(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const PIPELINE_STATE *pPipe, bool indexed,
2141                                      UNIQUE_VALIDATION_ERROR_CODE const msg_code) {
2142    bool result = false;
2143    if (pPipe->graphicsPipelineCI.pInputAssemblyState &&
2144        ((pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST) ||
2145         (pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP))) {
2146        result |= validate_status(dev_data, pCB, CBSTATUS_LINE_WIDTH_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2147                                  "Dynamic line width state not set for this command buffer", msg_code);
2148    }
2149    if (pPipe->graphicsPipelineCI.pRasterizationState &&
2150        (pPipe->graphicsPipelineCI.pRasterizationState->depthBiasEnable == VK_TRUE)) {
2151        result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BIAS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2152                                  "Dynamic depth bias state not set for this command buffer", msg_code);
2153    }
2154    if (pPipe->blendConstantsEnabled) {
2155        result |= validate_status(dev_data, pCB, CBSTATUS_BLEND_CONSTANTS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2156                                  "Dynamic blend constants state not set for this command buffer", msg_code);
2157    }
2158    if (pPipe->graphicsPipelineCI.pDepthStencilState &&
2159        (pPipe->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE)) {
2160        result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BOUNDS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2161                                  "Dynamic depth bounds state not set for this command buffer", msg_code);
2162    }
2163    if (pPipe->graphicsPipelineCI.pDepthStencilState &&
2164        (pPipe->graphicsPipelineCI.pDepthStencilState->stencilTestEnable == VK_TRUE)) {
2165        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_READ_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2166                                  "Dynamic stencil read mask state not set for this command buffer", msg_code);
2167        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_WRITE_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2168                                  "Dynamic stencil write mask state not set for this command buffer", msg_code);
2169        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_REFERENCE_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2170                                  "Dynamic stencil reference state not set for this command buffer", msg_code);
2171    }
2172    if (indexed) {
2173        result |= validate_status(dev_data, pCB, CBSTATUS_INDEX_BUFFER_BOUND, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2174                                  "Index buffer object not bound to this command buffer when Indexed Draw attempted", msg_code);
2175    }
2176
2177    return result;
2178}
2179
2180// Verify attachment reference compatibility according to spec
2181//  If one array is larger, treat missing elements of shorter array as VK_ATTACHMENT_UNUSED & other array much match this
2182//  If both AttachmentReference arrays have requested index, check their corresponding AttachmentDescriptions
2183//   to make sure that format and samples counts match.
2184//  If not, they are not compatible.
2185static bool attachment_references_compatible(const uint32_t index, const VkAttachmentReference *pPrimary,
2186                                             const uint32_t primaryCount, const VkAttachmentDescription *pPrimaryAttachments,
2187                                             const VkAttachmentReference *pSecondary, const uint32_t secondaryCount,
2188                                             const VkAttachmentDescription *pSecondaryAttachments) {
2189    // Check potential NULL cases first to avoid nullptr issues later
2190    if (pPrimary == nullptr) {
2191        if (pSecondary == nullptr) {
2192            return true;
2193        }
2194        return false;
2195    } else if (pSecondary == nullptr) {
2196        return false;
2197    }
2198    if (index >= primaryCount) {  // Check secondary as if primary is VK_ATTACHMENT_UNUSED
2199        if (VK_ATTACHMENT_UNUSED == pSecondary[index].attachment) return true;
2200    } else if (index >= secondaryCount) {  // Check primary as if secondary is VK_ATTACHMENT_UNUSED
2201        if (VK_ATTACHMENT_UNUSED == pPrimary[index].attachment) return true;
2202    } else {  // Format and sample count must match
2203        if ((pPrimary[index].attachment == VK_ATTACHMENT_UNUSED) && (pSecondary[index].attachment == VK_ATTACHMENT_UNUSED)) {
2204            return true;
2205        } else if ((pPrimary[index].attachment == VK_ATTACHMENT_UNUSED) || (pSecondary[index].attachment == VK_ATTACHMENT_UNUSED)) {
2206            return false;
2207        }
2208        if ((pPrimaryAttachments[pPrimary[index].attachment].format ==
2209             pSecondaryAttachments[pSecondary[index].attachment].format) &&
2210            (pPrimaryAttachments[pPrimary[index].attachment].samples ==
2211             pSecondaryAttachments[pSecondary[index].attachment].samples))
2212            return true;
2213    }
2214    // Format and sample counts didn't match
2215    return false;
2216}
2217// TODO : Scrub verify_renderpass_compatibility() and validateRenderPassCompatibility() and unify them and/or share code
2218// For given primary RenderPass object and secondry RenderPassCreateInfo, verify that they're compatible
2219static bool verify_renderpass_compatibility(const layer_data *dev_data, const VkRenderPassCreateInfo *primaryRPCI,
2220                                            const VkRenderPassCreateInfo *secondaryRPCI, string &errorMsg) {
2221    if (primaryRPCI->subpassCount != secondaryRPCI->subpassCount) {
2222        stringstream errorStr;
2223        errorStr << "RenderPass for primary cmdBuffer has " << primaryRPCI->subpassCount
2224                 << " subpasses but renderPass for secondary cmdBuffer has " << secondaryRPCI->subpassCount << " subpasses.";
2225        errorMsg = errorStr.str();
2226        return false;
2227    }
2228    uint32_t spIndex = 0;
2229    for (spIndex = 0; spIndex < primaryRPCI->subpassCount; ++spIndex) {
2230        // For each subpass, verify that corresponding color, input, resolve & depth/stencil attachment references are compatible
2231        uint32_t primaryColorCount = primaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2232        uint32_t secondaryColorCount = secondaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2233        uint32_t colorMax = std::max(primaryColorCount, secondaryColorCount);
2234        for (uint32_t cIdx = 0; cIdx < colorMax; ++cIdx) {
2235            if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pColorAttachments, primaryColorCount,
2236                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pColorAttachments,
2237                                                  secondaryColorCount, secondaryRPCI->pAttachments)) {
2238                stringstream errorStr;
2239                errorStr << "color attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2240                errorMsg = errorStr.str();
2241                return false;
2242            } else if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2243                                                         primaryColorCount, primaryRPCI->pAttachments,
2244                                                         secondaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2245                                                         secondaryColorCount, secondaryRPCI->pAttachments)) {
2246                stringstream errorStr;
2247                errorStr << "resolve attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2248                errorMsg = errorStr.str();
2249                return false;
2250            }
2251        }
2252
2253        if (!attachment_references_compatible(0, primaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment, 1,
2254                                              primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2255                                              1, secondaryRPCI->pAttachments)) {
2256            stringstream errorStr;
2257            errorStr << "depth/stencil attachments of subpass index " << spIndex << " are not compatible.";
2258            errorMsg = errorStr.str();
2259            return false;
2260        }
2261
2262        uint32_t primaryInputCount = primaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2263        uint32_t secondaryInputCount = secondaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2264        uint32_t inputMax = std::max(primaryInputCount, secondaryInputCount);
2265        for (uint32_t i = 0; i < inputMax; ++i) {
2266            if (!attachment_references_compatible(i, primaryRPCI->pSubpasses[spIndex].pInputAttachments, primaryColorCount,
2267                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pInputAttachments,
2268                                                  secondaryColorCount, secondaryRPCI->pAttachments)) {
2269                stringstream errorStr;
2270                errorStr << "input attachments at index " << i << " of subpass index " << spIndex << " are not compatible.";
2271                errorMsg = errorStr.str();
2272                return false;
2273            }
2274        }
2275    }
2276    return true;
2277}
2278
2279// For given cvdescriptorset::DescriptorSet, verify that its Set is compatible w/ the setLayout corresponding to
2280// pipelineLayout[layoutIndex]
2281static bool verify_set_layout_compatibility(layer_data *dev_data, const cvdescriptorset::DescriptorSet *descriptor_set,
2282                                            PIPELINE_LAYOUT_NODE const *pipeline_layout, const uint32_t layoutIndex,
2283                                            string &errorMsg) {
2284    auto num_sets = pipeline_layout->set_layouts.size();
2285    if (layoutIndex >= num_sets) {
2286        stringstream errorStr;
2287        errorStr << "VkPipelineLayout (" << pipeline_layout->layout << ") only contains " << num_sets
2288                 << " setLayouts corresponding to sets 0-" << num_sets - 1 << ", but you're attempting to bind set to index "
2289                 << layoutIndex;
2290        errorMsg = errorStr.str();
2291        return false;
2292    }
2293    auto layout_node = pipeline_layout->set_layouts[layoutIndex];
2294    return descriptor_set->IsCompatible(layout_node, &errorMsg);
2295}
2296
2297// Validate that data for each specialization entry is fully contained within the buffer.
2298static bool validate_specialization_offsets(debug_report_data *report_data, VkPipelineShaderStageCreateInfo const *info) {
2299    bool pass = true;
2300
2301    VkSpecializationInfo const *spec = info->pSpecializationInfo;
2302
2303    if (spec) {
2304        for (auto i = 0u; i < spec->mapEntryCount; i++) {
2305            // TODO: This is a good place for VALIDATION_ERROR_00589.
2306            if (spec->pMapEntries[i].offset + spec->pMapEntries[i].size > spec->dataSize) {
2307                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 0, __LINE__,
2308                            VALIDATION_ERROR_00590, "SC",
2309                            "Specialization entry %u (for constant id %u) references memory outside provided "
2310                            "specialization data (bytes %u.." PRINTF_SIZE_T_SPECIFIER "; " PRINTF_SIZE_T_SPECIFIER
2311                            " bytes provided). %s.",
2312                            i, spec->pMapEntries[i].constantID, spec->pMapEntries[i].offset,
2313                            spec->pMapEntries[i].offset + spec->pMapEntries[i].size - 1, spec->dataSize,
2314                            validation_error_map[VALIDATION_ERROR_00590])) {
2315                    pass = false;
2316                }
2317            }
2318        }
2319    }
2320
2321    return pass;
2322}
2323
2324static bool descriptor_type_match(shader_module const *module, uint32_t type_id, VkDescriptorType descriptor_type,
2325                                  unsigned &descriptor_count) {
2326    auto type = module->get_def(type_id);
2327
2328    descriptor_count = 1;
2329
2330    // Strip off any array or ptrs. Where we remove array levels, adjust the  descriptor count for each dimension.
2331    while (type.opcode() == spv::OpTypeArray || type.opcode() == spv::OpTypePointer) {
2332        if (type.opcode() == spv::OpTypeArray) {
2333            descriptor_count *= get_constant_value(module, type.word(3));
2334            type = module->get_def(type.word(2));
2335        } else {
2336            type = module->get_def(type.word(3));
2337        }
2338    }
2339
2340    switch (type.opcode()) {
2341        case spv::OpTypeStruct: {
2342            for (auto insn : *module) {
2343                if (insn.opcode() == spv::OpDecorate && insn.word(1) == type.word(1)) {
2344                    if (insn.word(2) == spv::DecorationBlock) {
2345                        return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
2346                               descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
2347                    } else if (insn.word(2) == spv::DecorationBufferBlock) {
2348                        return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
2349                               descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
2350                    }
2351                }
2352            }
2353
2354            // Invalid
2355            return false;
2356        }
2357
2358        case spv::OpTypeSampler:
2359            return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLER || descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
2360
2361        case spv::OpTypeSampledImage:
2362            if (descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) {
2363                // Slight relaxation for some GLSL historical madness: samplerBuffer doesn't really have a sampler, and a texel
2364                // buffer descriptor doesn't really provide one. Allow this slight mismatch.
2365                auto image_type = module->get_def(type.word(2));
2366                auto dim = image_type.word(3);
2367                auto sampled = image_type.word(7);
2368                return dim == spv::DimBuffer && sampled == 1;
2369            }
2370            return descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
2371
2372        case spv::OpTypeImage: {
2373            // Many descriptor types backing image types-- depends on dimension and whether the image will be used with a sampler.
2374            // SPIRV for Vulkan requires that sampled be 1 or 2 -- leaving the decision to runtime is unacceptable.
2375            auto dim = type.word(3);
2376            auto sampled = type.word(7);
2377
2378            if (dim == spv::DimSubpassData) {
2379                return descriptor_type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
2380            } else if (dim == spv::DimBuffer) {
2381                if (sampled == 1) {
2382                    return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
2383                } else {
2384                    return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
2385                }
2386            } else if (sampled == 1) {
2387                return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE ||
2388                       descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
2389            } else {
2390                return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
2391            }
2392        }
2393
2394        // We shouldn't really see any other junk types -- but if we do, they're a mismatch.
2395        default:
2396            return false;  // Mismatch
2397    }
2398}
2399
2400static bool require_feature(debug_report_data *report_data, VkBool32 feature, char const *feature_name) {
2401    if (!feature) {
2402        if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2403                    SHADER_CHECKER_FEATURE_NOT_ENABLED, "SC",
2404                    "Shader requires VkPhysicalDeviceFeatures::%s but is not "
2405                    "enabled on the device",
2406                    feature_name)) {
2407            return false;
2408        }
2409    }
2410
2411    return true;
2412}
2413
2414static bool validate_shader_capabilities(debug_report_data *report_data, shader_module const *src,
2415                                         VkPhysicalDeviceFeatures const *enabledFeatures) {
2416    bool pass = true;
2417
2418    for (auto insn : *src) {
2419        if (insn.opcode() == spv::OpCapability) {
2420            switch (insn.word(1)) {
2421                case spv::CapabilityMatrix:
2422                case spv::CapabilityShader:
2423                case spv::CapabilityInputAttachment:
2424                case spv::CapabilitySampled1D:
2425                case spv::CapabilityImage1D:
2426                case spv::CapabilitySampledBuffer:
2427                case spv::CapabilityImageBuffer:
2428                case spv::CapabilityImageQuery:
2429                case spv::CapabilityDerivativeControl:
2430                    // Always supported by a Vulkan 1.0 implementation -- no feature bits.
2431                    break;
2432
2433                case spv::CapabilityGeometry:
2434                    pass &= require_feature(report_data, enabledFeatures->geometryShader, "geometryShader");
2435                    break;
2436
2437                case spv::CapabilityTessellation:
2438                    pass &= require_feature(report_data, enabledFeatures->tessellationShader, "tessellationShader");
2439                    break;
2440
2441                case spv::CapabilityFloat64:
2442                    pass &= require_feature(report_data, enabledFeatures->shaderFloat64, "shaderFloat64");
2443                    break;
2444
2445                case spv::CapabilityInt64:
2446                    pass &= require_feature(report_data, enabledFeatures->shaderInt64, "shaderInt64");
2447                    break;
2448
2449                case spv::CapabilityTessellationPointSize:
2450                case spv::CapabilityGeometryPointSize:
2451                    pass &= require_feature(report_data, enabledFeatures->shaderTessellationAndGeometryPointSize,
2452                                            "shaderTessellationAndGeometryPointSize");
2453                    break;
2454
2455                case spv::CapabilityImageGatherExtended:
2456                    pass &= require_feature(report_data, enabledFeatures->shaderImageGatherExtended, "shaderImageGatherExtended");
2457                    break;
2458
2459                case spv::CapabilityStorageImageMultisample:
2460                    pass &= require_feature(report_data, enabledFeatures->shaderStorageImageMultisample,
2461                                            "shaderStorageImageMultisample");
2462                    break;
2463
2464                case spv::CapabilityUniformBufferArrayDynamicIndexing:
2465                    pass &= require_feature(report_data, enabledFeatures->shaderUniformBufferArrayDynamicIndexing,
2466                                            "shaderUniformBufferArrayDynamicIndexing");
2467                    break;
2468
2469                case spv::CapabilitySampledImageArrayDynamicIndexing:
2470                    pass &= require_feature(report_data, enabledFeatures->shaderSampledImageArrayDynamicIndexing,
2471                                            "shaderSampledImageArrayDynamicIndexing");
2472                    break;
2473
2474                case spv::CapabilityStorageBufferArrayDynamicIndexing:
2475                    pass &= require_feature(report_data, enabledFeatures->shaderStorageBufferArrayDynamicIndexing,
2476                                            "shaderStorageBufferArrayDynamicIndexing");
2477                    break;
2478
2479                case spv::CapabilityStorageImageArrayDynamicIndexing:
2480                    pass &= require_feature(report_data, enabledFeatures->shaderStorageImageArrayDynamicIndexing,
2481                                            "shaderStorageImageArrayDynamicIndexing");
2482                    break;
2483
2484                case spv::CapabilityClipDistance:
2485                    pass &= require_feature(report_data, enabledFeatures->shaderClipDistance, "shaderClipDistance");
2486                    break;
2487
2488                case spv::CapabilityCullDistance:
2489                    pass &= require_feature(report_data, enabledFeatures->shaderCullDistance, "shaderCullDistance");
2490                    break;
2491
2492                case spv::CapabilityImageCubeArray:
2493                    pass &= require_feature(report_data, enabledFeatures->imageCubeArray, "imageCubeArray");
2494                    break;
2495
2496                case spv::CapabilitySampleRateShading:
2497                    pass &= require_feature(report_data, enabledFeatures->sampleRateShading, "sampleRateShading");
2498                    break;
2499
2500                case spv::CapabilitySparseResidency:
2501                    pass &= require_feature(report_data, enabledFeatures->shaderResourceResidency, "shaderResourceResidency");
2502                    break;
2503
2504                case spv::CapabilityMinLod:
2505                    pass &= require_feature(report_data, enabledFeatures->shaderResourceMinLod, "shaderResourceMinLod");
2506                    break;
2507
2508                case spv::CapabilitySampledCubeArray:
2509                    pass &= require_feature(report_data, enabledFeatures->imageCubeArray, "imageCubeArray");
2510                    break;
2511
2512                case spv::CapabilityImageMSArray:
2513                    pass &= require_feature(report_data, enabledFeatures->shaderStorageImageMultisample,
2514                                            "shaderStorageImageMultisample");
2515                    break;
2516
2517                case spv::CapabilityStorageImageExtendedFormats:
2518                    pass &= require_feature(report_data, enabledFeatures->shaderStorageImageExtendedFormats,
2519                                            "shaderStorageImageExtendedFormats");
2520                    break;
2521
2522                case spv::CapabilityInterpolationFunction:
2523                    pass &= require_feature(report_data, enabledFeatures->sampleRateShading, "sampleRateShading");
2524                    break;
2525
2526                case spv::CapabilityStorageImageReadWithoutFormat:
2527                    pass &= require_feature(report_data, enabledFeatures->shaderStorageImageReadWithoutFormat,
2528                                            "shaderStorageImageReadWithoutFormat");
2529                    break;
2530
2531                case spv::CapabilityStorageImageWriteWithoutFormat:
2532                    pass &= require_feature(report_data, enabledFeatures->shaderStorageImageWriteWithoutFormat,
2533                                            "shaderStorageImageWriteWithoutFormat");
2534                    break;
2535
2536                case spv::CapabilityMultiViewport:
2537                    pass &= require_feature(report_data, enabledFeatures->multiViewport, "multiViewport");
2538                    break;
2539
2540                default:
2541                    if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2542                                SHADER_CHECKER_BAD_CAPABILITY, "SC", "Shader declares capability %u, not supported in Vulkan.",
2543                                insn.word(1)))
2544                        pass = false;
2545                    break;
2546            }
2547        }
2548    }
2549
2550    return pass;
2551}
2552
2553static uint32_t descriptor_type_to_reqs(shader_module const *module, uint32_t type_id) {
2554    auto type = module->get_def(type_id);
2555
2556    while (true) {
2557        switch (type.opcode()) {
2558            case spv::OpTypeArray:
2559            case spv::OpTypeSampledImage:
2560                type = module->get_def(type.word(2));
2561                break;
2562            case spv::OpTypePointer:
2563                type = module->get_def(type.word(3));
2564                break;
2565            case spv::OpTypeImage: {
2566                auto dim = type.word(3);
2567                auto arrayed = type.word(5);
2568                auto msaa = type.word(6);
2569
2570                switch (dim) {
2571                    case spv::Dim1D:
2572                        return arrayed ? DESCRIPTOR_REQ_VIEW_TYPE_1D_ARRAY : DESCRIPTOR_REQ_VIEW_TYPE_1D;
2573                    case spv::Dim2D:
2574                        return (msaa ? DESCRIPTOR_REQ_MULTI_SAMPLE : DESCRIPTOR_REQ_SINGLE_SAMPLE) |
2575                               (arrayed ? DESCRIPTOR_REQ_VIEW_TYPE_2D_ARRAY : DESCRIPTOR_REQ_VIEW_TYPE_2D);
2576                    case spv::Dim3D:
2577                        return DESCRIPTOR_REQ_VIEW_TYPE_3D;
2578                    case spv::DimCube:
2579                        return arrayed ? DESCRIPTOR_REQ_VIEW_TYPE_CUBE_ARRAY : DESCRIPTOR_REQ_VIEW_TYPE_CUBE;
2580                    case spv::DimSubpassData:
2581                        return msaa ? DESCRIPTOR_REQ_MULTI_SAMPLE : DESCRIPTOR_REQ_SINGLE_SAMPLE;
2582                    default:  // buffer, etc.
2583                        return 0;
2584                }
2585            }
2586            default:
2587                return 0;
2588        }
2589    }
2590}
2591
2592static bool validate_pipeline_shader_stage(
2593    debug_report_data *report_data, VkPipelineShaderStageCreateInfo const *pStage, PIPELINE_STATE *pipeline,
2594    shader_module **out_module, spirv_inst_iter *out_entrypoint, VkPhysicalDeviceFeatures const *enabledFeatures,
2595    std::unordered_map<VkShaderModule, std::unique_ptr<shader_module>> const &shaderModuleMap) {
2596    bool pass = true;
2597    auto module_it = shaderModuleMap.find(pStage->module);
2598    auto module = *out_module = module_it->second.get();
2599
2600    if (!module->has_valid_spirv) return pass;
2601
2602    // Find the entrypoint
2603    auto entrypoint = *out_entrypoint = find_entrypoint(module, pStage->pName, pStage->stage);
2604    if (entrypoint == module->end()) {
2605        if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__, VALIDATION_ERROR_00510,
2606                    "SC", "No entrypoint found named `%s` for stage %s. %s.", pStage->pName,
2607                    string_VkShaderStageFlagBits(pStage->stage), validation_error_map[VALIDATION_ERROR_00510])) {
2608            return false;  // no point continuing beyond here, any analysis is just going to be garbage.
2609        }
2610    }
2611
2612    // Validate shader capabilities against enabled device features
2613    pass &= validate_shader_capabilities(report_data, module, enabledFeatures);
2614
2615    // Mark accessible ids
2616    auto accessible_ids = mark_accessible_ids(module, entrypoint);
2617
2618    // Validate descriptor set layout against what the entrypoint actually uses
2619    auto descriptor_uses = collect_interface_by_descriptor_slot(report_data, module, accessible_ids);
2620
2621    auto pipelineLayout = pipeline->pipeline_layout;
2622
2623    pass &= validate_specialization_offsets(report_data, pStage);
2624    pass &= validate_push_constant_usage(report_data, &pipelineLayout.push_constant_ranges, module, accessible_ids, pStage->stage);
2625
2626    // Validate descriptor use
2627    for (auto use : descriptor_uses) {
2628        // While validating shaders capture which slots are used by the pipeline
2629        auto &reqs = pipeline->active_slots[use.first.first][use.first.second];
2630        reqs = descriptor_req(reqs | descriptor_type_to_reqs(module, use.second.type_id));
2631
2632        // Verify given pipelineLayout has requested setLayout with requested binding
2633        const auto &binding = get_descriptor_binding(&pipelineLayout, use.first);
2634        unsigned required_descriptor_count;
2635
2636        if (!binding) {
2637            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2638                        SHADER_CHECKER_MISSING_DESCRIPTOR, "SC",
2639                        "Shader uses descriptor slot %u.%u (used as type `%s`) but not declared in pipeline layout",
2640                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str())) {
2641                pass = false;
2642            }
2643        } else if (~binding->stageFlags & pStage->stage) {
2644            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 0, __LINE__,
2645                        SHADER_CHECKER_DESCRIPTOR_NOT_ACCESSIBLE_FROM_STAGE, "SC",
2646                        "Shader uses descriptor slot %u.%u (used "
2647                        "as type `%s`) but descriptor not "
2648                        "accessible from stage %s",
2649                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str(),
2650                        string_VkShaderStageFlagBits(pStage->stage))) {
2651                pass = false;
2652            }
2653        } else if (!descriptor_type_match(module, use.second.type_id, binding->descriptorType, required_descriptor_count)) {
2654            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2655                        SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC",
2656                        "Type mismatch on descriptor slot "
2657                        "%u.%u (used as type `%s`) but "
2658                        "descriptor of type %s",
2659                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str(),
2660                        string_VkDescriptorType(binding->descriptorType))) {
2661                pass = false;
2662            }
2663        } else if (binding->descriptorCount < required_descriptor_count) {
2664            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2665                        SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC",
2666                        "Shader expects at least %u descriptors for binding %u.%u (used as type `%s`) but only %u provided",
2667                        required_descriptor_count, use.first.first, use.first.second,
2668                        describe_type(module, use.second.type_id).c_str(), binding->descriptorCount)) {
2669                pass = false;
2670            }
2671        }
2672    }
2673
2674    // Validate use of input attachments against subpass structure
2675    if (pStage->stage == VK_SHADER_STAGE_FRAGMENT_BIT) {
2676        auto input_attachment_uses = collect_interface_by_input_attachment_index(report_data, module, accessible_ids);
2677
2678        auto rpci = pipeline->render_pass_ci.ptr();
2679        auto subpass = pipeline->graphicsPipelineCI.subpass;
2680
2681        for (auto use : input_attachment_uses) {
2682            auto input_attachments = rpci->pSubpasses[subpass].pInputAttachments;
2683            auto index = (input_attachments && use.first < rpci->pSubpasses[subpass].inputAttachmentCount)
2684                             ? input_attachments[use.first].attachment
2685                             : VK_ATTACHMENT_UNUSED;
2686
2687            if (index == VK_ATTACHMENT_UNUSED) {
2688                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2689                            SHADER_CHECKER_MISSING_INPUT_ATTACHMENT, "SC",
2690                            "Shader consumes input attachment index %d but not provided in subpass", use.first)) {
2691                    pass = false;
2692                }
2693            } else if (get_format_type(rpci->pAttachments[index].format) != get_fundamental_type(module, use.second.type_id)) {
2694                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2695                            SHADER_CHECKER_INPUT_ATTACHMENT_TYPE_MISMATCH, "SC",
2696                            "Subpass input attachment %u format of %s does not match type used in shader `%s`", use.first,
2697                            string_VkFormat(rpci->pAttachments[index].format), describe_type(module, use.second.type_id).c_str())) {
2698                    pass = false;
2699                }
2700            }
2701        }
2702    }
2703
2704    return pass;
2705}
2706
2707// Validate that the shaders used by the given pipeline and store the active_slots
2708//  that are actually used by the pipeline into pPipeline->active_slots
2709static bool validate_and_capture_pipeline_shader_state(
2710    debug_report_data *report_data, PIPELINE_STATE *pPipeline, VkPhysicalDeviceFeatures const *enabledFeatures,
2711    std::unordered_map<VkShaderModule, unique_ptr<shader_module>> const &shaderModuleMap) {
2712    auto pCreateInfo = pPipeline->graphicsPipelineCI.ptr();
2713    int vertex_stage = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2714    int fragment_stage = get_shader_stage_id(VK_SHADER_STAGE_FRAGMENT_BIT);
2715
2716    shader_module *shaders[5];
2717    memset(shaders, 0, sizeof(shaders));
2718    spirv_inst_iter entrypoints[5];
2719    memset(entrypoints, 0, sizeof(entrypoints));
2720    VkPipelineVertexInputStateCreateInfo const *vi = 0;
2721    bool pass = true;
2722
2723    for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
2724        auto pStage = &pCreateInfo->pStages[i];
2725        auto stage_id = get_shader_stage_id(pStage->stage);
2726        pass &= validate_pipeline_shader_stage(report_data, pStage, pPipeline, &shaders[stage_id], &entrypoints[stage_id],
2727                                               enabledFeatures, shaderModuleMap);
2728    }
2729
2730    // if the shader stages are no good individually, cross-stage validation is pointless.
2731    if (!pass) return false;
2732
2733    vi = pCreateInfo->pVertexInputState;
2734
2735    if (vi) {
2736        pass &= validate_vi_consistency(report_data, vi);
2737    }
2738
2739    if (shaders[vertex_stage] && shaders[vertex_stage]->has_valid_spirv) {
2740        pass &= validate_vi_against_vs_inputs(report_data, vi, shaders[vertex_stage], entrypoints[vertex_stage]);
2741    }
2742
2743    int producer = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2744    int consumer = get_shader_stage_id(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT);
2745
2746    while (!shaders[producer] && producer != fragment_stage) {
2747        producer++;
2748        consumer++;
2749    }
2750
2751    for (; producer != fragment_stage && consumer <= fragment_stage; consumer++) {
2752        assert(shaders[producer]);
2753        if (shaders[consumer] && shaders[consumer]->has_valid_spirv && shaders[producer]->has_valid_spirv) {
2754            pass &= validate_interface_between_stages(report_data, shaders[producer], entrypoints[producer],
2755                                                      &shader_stage_attribs[producer], shaders[consumer], entrypoints[consumer],
2756                                                      &shader_stage_attribs[consumer]);
2757
2758            producer = consumer;
2759        }
2760    }
2761
2762    if (shaders[fragment_stage] && shaders[fragment_stage]->has_valid_spirv) {
2763        pass &= validate_fs_outputs_against_render_pass(report_data, shaders[fragment_stage], entrypoints[fragment_stage],
2764                                                        pPipeline->render_pass_ci.ptr(), pCreateInfo->subpass);
2765    }
2766
2767    return pass;
2768}
2769
2770static bool validate_compute_pipeline(debug_report_data *report_data, PIPELINE_STATE *pPipeline,
2771                                      VkPhysicalDeviceFeatures const *enabledFeatures,
2772                                      std::unordered_map<VkShaderModule, unique_ptr<shader_module>> const &shaderModuleMap) {
2773    auto pCreateInfo = pPipeline->computePipelineCI.ptr();
2774
2775    shader_module *module;
2776    spirv_inst_iter entrypoint;
2777
2778    return validate_pipeline_shader_stage(report_data, &pCreateInfo->stage, pPipeline, &module, &entrypoint, enabledFeatures,
2779                                          shaderModuleMap);
2780}
2781// Return Set node ptr for specified set or else NULL
2782cvdescriptorset::DescriptorSet *getSetNode(const layer_data *dev_data, VkDescriptorSet set) {
2783    auto set_it = dev_data->setMap.find(set);
2784    if (set_it == dev_data->setMap.end()) {
2785        return NULL;
2786    }
2787    return set_it->second;
2788}
2789
2790// For given pipeline, return number of MSAA samples, or one if MSAA disabled
2791static VkSampleCountFlagBits getNumSamples(PIPELINE_STATE const *pipe) {
2792    if (pipe->graphicsPipelineCI.pMultisampleState != NULL &&
2793        VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO == pipe->graphicsPipelineCI.pMultisampleState->sType) {
2794        return pipe->graphicsPipelineCI.pMultisampleState->rasterizationSamples;
2795    }
2796    return VK_SAMPLE_COUNT_1_BIT;
2797}
2798
2799static void list_bits(std::ostream &s, uint32_t bits) {
2800    for (int i = 0; i < 32 && bits; i++) {
2801        if (bits & (1 << i)) {
2802            s << i;
2803            bits &= ~(1 << i);
2804            if (bits) {
2805                s << ",";
2806            }
2807        }
2808    }
2809}
2810
2811// Validate draw-time state related to the PSO
2812static bool ValidatePipelineDrawtimeState(layer_data const *dev_data, LAST_BOUND_STATE const &state, const GLOBAL_CB_NODE *pCB,
2813                                          PIPELINE_STATE const *pPipeline) {
2814    bool skip_call = false;
2815
2816    // Verify vertex binding
2817    if (pPipeline->vertexBindingDescriptions.size() > 0) {
2818        for (size_t i = 0; i < pPipeline->vertexBindingDescriptions.size(); i++) {
2819            auto vertex_binding = pPipeline->vertexBindingDescriptions[i].binding;
2820            if ((pCB->currentDrawData.buffers.size() < (vertex_binding + 1)) ||
2821                (pCB->currentDrawData.buffers[vertex_binding] == VK_NULL_HANDLE)) {
2822                skip_call |=
2823                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
2824                            DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
2825                            "The Pipeline State Object (0x%" PRIxLEAST64
2826                            ") expects that this Command Buffer's vertex binding Index %u "
2827                            "should be set via vkCmdBindVertexBuffers. This is because VkVertexInputBindingDescription struct "
2828                            "at index " PRINTF_SIZE_T_SPECIFIER " of pVertexBindingDescriptions has a binding value of %u.",
2829                            (uint64_t)state.pipeline_state->pipeline, vertex_binding, i, vertex_binding);
2830            }
2831        }
2832    } else {
2833        if (!pCB->currentDrawData.buffers.empty() && !pCB->vertex_buffer_used) {
2834            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
2835                                 0, __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
2836                                 "Vertex buffers are bound to command buffer (0x%p"
2837                                 ") but no vertex buffers are attached to this Pipeline State Object (0x%" PRIxLEAST64 ").",
2838                                 pCB->commandBuffer, (uint64_t)state.pipeline_state->pipeline);
2839        }
2840    }
2841    // If Viewport or scissors are dynamic, verify that dynamic count matches PSO count.
2842    // Skip check if rasterization is disabled or there is no viewport.
2843    if ((!pPipeline->graphicsPipelineCI.pRasterizationState ||
2844         (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) &&
2845        pPipeline->graphicsPipelineCI.pViewportState) {
2846        bool dynViewport = isDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
2847        bool dynScissor = isDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
2848
2849        if (dynViewport) {
2850            auto requiredViewportsMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->viewportCount) - 1;
2851            auto missingViewportMask = ~pCB->viewportMask & requiredViewportsMask;
2852            if (missingViewportMask) {
2853                std::stringstream ss;
2854                ss << "Dynamic viewport(s) ";
2855                list_bits(ss, missingViewportMask);
2856                ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetViewport().";
2857                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2858                                     __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", "%s", ss.str().c_str());
2859            }
2860        }
2861
2862        if (dynScissor) {
2863            auto requiredScissorMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->scissorCount) - 1;
2864            auto missingScissorMask = ~pCB->scissorMask & requiredScissorMask;
2865            if (missingScissorMask) {
2866                std::stringstream ss;
2867                ss << "Dynamic scissor(s) ";
2868                list_bits(ss, missingScissorMask);
2869                ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetScissor().";
2870                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2871                                     __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", "%s", ss.str().c_str());
2872            }
2873        }
2874    }
2875
2876    // Verify that any MSAA request in PSO matches sample# in bound FB
2877    // Skip the check if rasterization is disabled.
2878    if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
2879        (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
2880        VkSampleCountFlagBits pso_num_samples = getNumSamples(pPipeline);
2881        if (pCB->activeRenderPass) {
2882            auto const render_pass_info = pCB->activeRenderPass->createInfo.ptr();
2883            const VkSubpassDescription *subpass_desc = &render_pass_info->pSubpasses[pCB->activeSubpass];
2884            uint32_t i;
2885            unsigned subpass_num_samples = 0;
2886
2887            for (i = 0; i < subpass_desc->colorAttachmentCount; i++) {
2888                auto attachment = subpass_desc->pColorAttachments[i].attachment;
2889                if (attachment != VK_ATTACHMENT_UNUSED)
2890                    subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
2891            }
2892
2893            if (subpass_desc->pDepthStencilAttachment &&
2894                subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
2895                auto attachment = subpass_desc->pDepthStencilAttachment->attachment;
2896                subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
2897            }
2898
2899            if (subpass_num_samples && static_cast<unsigned>(pso_num_samples) != subpass_num_samples) {
2900                skip_call |=
2901                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
2902                            reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
2903                            "Num samples mismatch! At draw-time in Pipeline (0x%" PRIxLEAST64
2904                            ") with %u samples while current RenderPass (0x%" PRIxLEAST64 ") w/ %u samples!",
2905                            reinterpret_cast<const uint64_t &>(pPipeline->pipeline), pso_num_samples,
2906                            reinterpret_cast<const uint64_t &>(pCB->activeRenderPass->renderPass), subpass_num_samples);
2907            }
2908        } else {
2909            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
2910                                 reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH,
2911                                 "DS", "No active render pass found at draw-time in Pipeline (0x%" PRIxLEAST64 ")!",
2912                                 reinterpret_cast<const uint64_t &>(pPipeline->pipeline));
2913        }
2914    }
2915    // Verify that PSO creation renderPass is compatible with active renderPass
2916    if (pCB->activeRenderPass) {
2917        std::string err_string;
2918        if ((pCB->activeRenderPass->renderPass != pPipeline->graphicsPipelineCI.renderPass) &&
2919            !verify_renderpass_compatibility(dev_data, pCB->activeRenderPass->createInfo.ptr(), pPipeline->render_pass_ci.ptr(),
2920                                             err_string)) {
2921            // renderPass that PSO was created with must be compatible with active renderPass that PSO is being used with
2922            skip_call |=
2923                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
2924                        reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
2925                        "At Draw time the active render pass (0x%" PRIxLEAST64
2926                        ") is incompatible w/ gfx pipeline "
2927                        "(0x%" PRIxLEAST64 ") that was created w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
2928                        reinterpret_cast<uint64_t &>(pCB->activeRenderPass->renderPass),
2929                        reinterpret_cast<uint64_t const &>(pPipeline->pipeline),
2930                        reinterpret_cast<const uint64_t &>(pPipeline->graphicsPipelineCI.renderPass), err_string.c_str());
2931        }
2932
2933        if (pPipeline->graphicsPipelineCI.subpass != pCB->activeSubpass) {
2934            skip_call |=
2935                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
2936                        reinterpret_cast<uint64_t const &>(pPipeline->pipeline), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
2937                        "Pipeline was built for subpass %u but used in subpass %u", pPipeline->graphicsPipelineCI.subpass,
2938                        pCB->activeSubpass);
2939        }
2940    }
2941    // TODO : Add more checks here
2942
2943    return skip_call;
2944}
2945
2946// Validate overall state at the time of a draw call
2947static bool ValidateDrawState(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, const bool indexed,
2948                              const VkPipelineBindPoint bind_point, const char *function,
2949                              UNIQUE_VALIDATION_ERROR_CODE const msg_code) {
2950    bool result = false;
2951    auto const &state = cb_node->lastBound[bind_point];
2952    PIPELINE_STATE *pPipe = state.pipeline_state;
2953    if (nullptr == pPipe) {
2954        result |= log_msg(
2955            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2956            DRAWSTATE_INVALID_PIPELINE, "DS",
2957            "At Draw/Dispatch time no valid VkPipeline is bound! This is illegal. Please bind one with vkCmdBindPipeline().");
2958        // Early return as any further checks below will be busted w/o a pipeline
2959        if (result) return true;
2960    }
2961    // First check flag states
2962    if (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point)
2963        result = validate_draw_state_flags(dev_data, cb_node, pPipe, indexed, msg_code);
2964
2965    // Now complete other state checks
2966    if (VK_NULL_HANDLE != state.pipeline_layout.layout) {
2967        string errorString;
2968        auto pipeline_layout = pPipe->pipeline_layout;
2969
2970        for (const auto &set_binding_pair : pPipe->active_slots) {
2971            uint32_t setIndex = set_binding_pair.first;
2972            // If valid set is not bound throw an error
2973            if ((state.boundDescriptorSets.size() <= setIndex) || (!state.boundDescriptorSets[setIndex])) {
2974                result |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
2975                                  DRAWSTATE_DESCRIPTOR_SET_NOT_BOUND, "DS",
2976                                  "VkPipeline 0x%" PRIxLEAST64 " uses set #%u but that set is not bound.",
2977                                  (uint64_t)pPipe->pipeline, setIndex);
2978            } else if (!verify_set_layout_compatibility(dev_data, state.boundDescriptorSets[setIndex], &pipeline_layout, setIndex,
2979                                                        errorString)) {
2980                // Set is bound but not compatible w/ overlapping pipeline_layout from PSO
2981                VkDescriptorSet setHandle = state.boundDescriptorSets[setIndex]->GetSet();
2982                result |=
2983                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2984                            (uint64_t)setHandle, __LINE__, DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
2985                            "VkDescriptorSet (0x%" PRIxLEAST64
2986                            ") bound as set #%u is not compatible with overlapping VkPipelineLayout 0x%" PRIxLEAST64 " due to: %s",
2987                            reinterpret_cast<uint64_t &>(setHandle), setIndex, reinterpret_cast<uint64_t &>(pipeline_layout.layout),
2988                            errorString.c_str());
2989            } else {  // Valid set is bound and layout compatible, validate that it's updated
2990                // Pull the set node
2991                cvdescriptorset::DescriptorSet *descriptor_set = state.boundDescriptorSets[setIndex];
2992                // Gather active bindings
2993                std::unordered_set<uint32_t> active_bindings;
2994                for (auto binding : set_binding_pair.second) {
2995                    active_bindings.insert(binding.first);
2996                }
2997                // Make sure set has been updated if it has no immutable samplers
2998                //  If it has immutable samplers, we'll flag error later as needed depending on binding
2999                if (!descriptor_set->IsUpdated()) {
3000                    for (auto binding : active_bindings) {
3001                        if (!descriptor_set->GetImmutableSamplerPtrFromBinding(binding)) {
3002                            result |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
3003                                              VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)descriptor_set->GetSet(),
3004                                              __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
3005                                              "Descriptor Set 0x%" PRIxLEAST64
3006                                              " bound but was never updated. It is now being used to draw so "
3007                                              "this will result in undefined behavior.",
3008                                              (uint64_t)descriptor_set->GetSet());
3009                        }
3010                    }
3011                }
3012                // Validate the draw-time state for this descriptor set
3013                std::string err_str;
3014                if (!descriptor_set->ValidateDrawState(set_binding_pair.second, state.dynamicOffsets[setIndex], &err_str)) {
3015                    auto set = descriptor_set->GetSet();
3016                    result |= log_msg(
3017                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3018                        reinterpret_cast<const uint64_t &>(set), __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
3019                        "Descriptor set 0x%" PRIxLEAST64 " encountered the following validation error at %s() time: %s",
3020                        reinterpret_cast<const uint64_t &>(set), function, err_str.c_str());
3021                }
3022            }
3023        }
3024    }
3025
3026    // Check general pipeline state that needs to be validated at drawtime
3027    if (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point) result |= ValidatePipelineDrawtimeState(dev_data, state, cb_node, pPipe);
3028
3029    return result;
3030}
3031
3032static void UpdateDrawState(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, const VkPipelineBindPoint bind_point) {
3033    auto const &state = cb_state->lastBound[bind_point];
3034    PIPELINE_STATE *pPipe = state.pipeline_state;
3035    if (VK_NULL_HANDLE != state.pipeline_layout.layout) {
3036        for (const auto &set_binding_pair : pPipe->active_slots) {
3037            uint32_t setIndex = set_binding_pair.first;
3038            // Pull the set node
3039            cvdescriptorset::DescriptorSet *descriptor_set = state.boundDescriptorSets[setIndex];
3040            // Bind this set and its active descriptor resources to the command buffer
3041            descriptor_set->BindCommandBuffer(cb_state, set_binding_pair.second);
3042            // For given active slots record updated images & buffers
3043            descriptor_set->GetStorageUpdates(set_binding_pair.second, &cb_state->updateBuffers, &cb_state->updateImages);
3044        }
3045    }
3046    if (pPipe->vertexBindingDescriptions.size() > 0) {
3047        cb_state->vertex_buffer_used = true;
3048    }
3049}
3050
3051// Validate HW line width capabilities prior to setting requested line width.
3052static bool verifyLineWidth(layer_data *dev_data, DRAW_STATE_ERROR dsError, const uint64_t &target, float lineWidth) {
3053    bool skip_call = false;
3054
3055    // First check to see if the physical device supports wide lines.
3056    if ((VK_FALSE == dev_data->enabled_features.wideLines) && (1.0f != lineWidth)) {
3057        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, target, __LINE__,
3058                             dsError, "DS",
3059                             "Attempt to set lineWidth to %f but physical device wideLines feature "
3060                             "not supported/enabled so lineWidth must be 1.0f!",
3061                             lineWidth);
3062    } else {
3063        // Otherwise, make sure the width falls in the valid range.
3064        if ((dev_data->phys_dev_properties.properties.limits.lineWidthRange[0] > lineWidth) ||
3065            (dev_data->phys_dev_properties.properties.limits.lineWidthRange[1] < lineWidth)) {
3066            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, target,
3067                                 __LINE__, dsError, "DS",
3068                                 "Attempt to set lineWidth to %f but physical device limits line width "
3069                                 "to between [%f, %f]!",
3070                                 lineWidth, dev_data->phys_dev_properties.properties.limits.lineWidthRange[0],
3071                                 dev_data->phys_dev_properties.properties.limits.lineWidthRange[1]);
3072        }
3073    }
3074
3075    return skip_call;
3076}
3077
3078// Verify that create state for a pipeline is valid
3079static bool verifyPipelineCreateState(layer_data *dev_data, std::vector<PIPELINE_STATE *> pPipelines, int pipelineIndex) {
3080    bool skip_call = false;
3081
3082    PIPELINE_STATE *pPipeline = pPipelines[pipelineIndex];
3083
3084    // If create derivative bit is set, check that we've specified a base
3085    // pipeline correctly, and that the base pipeline was created to allow
3086    // derivatives.
3087    if (pPipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
3088        PIPELINE_STATE *pBasePipeline = nullptr;
3089        if (!((pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) ^
3090              (pPipeline->graphicsPipelineCI.basePipelineIndex != -1))) {
3091            // This check is a superset of VALIDATION_ERROR_00526 and VALIDATION_ERROR_00528
3092            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3093                                 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3094                                 "Invalid Pipeline CreateInfo: exactly one of base pipeline index and handle must be specified");
3095        } else if (pPipeline->graphicsPipelineCI.basePipelineIndex != -1) {
3096            if (pPipeline->graphicsPipelineCI.basePipelineIndex >= pipelineIndex) {
3097                skip_call |=
3098                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3099                            VALIDATION_ERROR_00518, "DS",
3100                            "Invalid Pipeline CreateInfo: base pipeline must occur earlier in array than derivative pipeline. %s",
3101                            validation_error_map[VALIDATION_ERROR_00518]);
3102            } else {
3103                pBasePipeline = pPipelines[pPipeline->graphicsPipelineCI.basePipelineIndex];
3104            }
3105        } else if (pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) {
3106            pBasePipeline = getPipelineState(dev_data, pPipeline->graphicsPipelineCI.basePipelineHandle);
3107        }
3108
3109        if (pBasePipeline && !(pBasePipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) {
3110            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3111                                 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3112                                 "Invalid Pipeline CreateInfo: base pipeline does not allow derivatives.");
3113        }
3114    }
3115
3116    if (pPipeline->graphicsPipelineCI.pColorBlendState != NULL) {
3117        const safe_VkPipelineColorBlendStateCreateInfo *color_blend_state = pPipeline->graphicsPipelineCI.pColorBlendState;
3118        auto const render_pass_info = getRenderPassState(dev_data, pPipeline->graphicsPipelineCI.renderPass)->createInfo.ptr();
3119        const VkSubpassDescription *subpass_desc = &render_pass_info->pSubpasses[pPipeline->graphicsPipelineCI.subpass];
3120        if (color_blend_state->attachmentCount != subpass_desc->colorAttachmentCount) {
3121            skip_call |= log_msg(
3122                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3123                reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_02109, "DS",
3124                "vkCreateGraphicsPipelines(): Render pass (0x%" PRIxLEAST64
3125                ") subpass %u has colorAttachmentCount of %u which doesn't match the pColorBlendState->attachmentCount of %u. %s",
3126                reinterpret_cast<const uint64_t &>(pPipeline->graphicsPipelineCI.renderPass), pPipeline->graphicsPipelineCI.subpass,
3127                subpass_desc->colorAttachmentCount, color_blend_state->attachmentCount,
3128                validation_error_map[VALIDATION_ERROR_02109]);
3129        }
3130        if (!dev_data->enabled_features.independentBlend) {
3131            if (pPipeline->attachments.size() > 1) {
3132                VkPipelineColorBlendAttachmentState *pAttachments = &pPipeline->attachments[0];
3133                for (size_t i = 1; i < pPipeline->attachments.size(); i++) {
3134                    // Quoting the spec: "If [the independent blend] feature is not enabled, the VkPipelineColorBlendAttachmentState
3135                    // settings for all color attachments must be identical." VkPipelineColorBlendAttachmentState contains
3136                    // only attachment state, so memcmp is best suited for the comparison
3137                    if (memcmp(static_cast<const void *>(pAttachments), static_cast<const void *>(&pAttachments[i]),
3138                               sizeof(pAttachments[0]))) {
3139                        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3140                                             __LINE__, VALIDATION_ERROR_01532, "DS",
3141                                             "Invalid Pipeline CreateInfo: If independent blend feature not "
3142                                             "enabled, all elements of pAttachments must be identical. %s",
3143                                             validation_error_map[VALIDATION_ERROR_01532]);
3144                        break;
3145                    }
3146                }
3147            }
3148        }
3149        if (!dev_data->enabled_features.logicOp && (pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable != VK_FALSE)) {
3150            skip_call |=
3151                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3152                        VALIDATION_ERROR_01533, "DS",
3153                        "Invalid Pipeline CreateInfo: If logic operations feature not enabled, logicOpEnable must be VK_FALSE. %s",
3154                        validation_error_map[VALIDATION_ERROR_01533]);
3155        }
3156    }
3157
3158    // Ensure the subpass index is valid. If not, then validate_and_capture_pipeline_shader_state
3159    // produces nonsense errors that confuse users. Other layers should already
3160    // emit errors for renderpass being invalid.
3161    auto renderPass = getRenderPassState(dev_data, pPipeline->graphicsPipelineCI.renderPass);
3162    if (renderPass && pPipeline->graphicsPipelineCI.subpass >= renderPass->createInfo.subpassCount) {
3163        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3164                             VALIDATION_ERROR_02122, "DS",
3165                             "Invalid Pipeline CreateInfo State: Subpass index %u "
3166                             "is out of range for this renderpass (0..%u). %s",
3167                             pPipeline->graphicsPipelineCI.subpass, renderPass->createInfo.subpassCount - 1,
3168                             validation_error_map[VALIDATION_ERROR_02122]);
3169    }
3170
3171    if (!GetDisables(dev_data)->shader_validation &&
3172        !validate_and_capture_pipeline_shader_state(dev_data->report_data, pPipeline, &dev_data->enabled_features,
3173                                                    dev_data->shaderModuleMap)) {
3174        skip_call = true;
3175    }
3176    // Each shader's stage must be unique
3177    if (pPipeline->duplicate_shaders) {
3178        for (uint32_t stage = VK_SHADER_STAGE_VERTEX_BIT; stage & VK_SHADER_STAGE_ALL_GRAPHICS; stage <<= 1) {
3179            if (pPipeline->duplicate_shaders & stage) {
3180                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
3181                                     __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3182                                     "Invalid Pipeline CreateInfo State: Multiple shaders provided for stage %s",
3183                                     string_VkShaderStageFlagBits(VkShaderStageFlagBits(stage)));
3184            }
3185        }
3186    }
3187    // VS is required
3188    if (!(pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT)) {
3189        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3190                             VALIDATION_ERROR_00532, "DS", "Invalid Pipeline CreateInfo State: Vertex Shader required. %s",
3191                             validation_error_map[VALIDATION_ERROR_00532]);
3192    }
3193    // Either both or neither TC/TE shaders should be defined
3194    if ((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) &&
3195        !(pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)) {
3196        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3197                             VALIDATION_ERROR_00534, "DS",
3198                             "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair. %s",
3199                             validation_error_map[VALIDATION_ERROR_00534]);
3200    }
3201    if (!(pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) &&
3202        (pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)) {
3203        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3204                             VALIDATION_ERROR_00535, "DS",
3205                             "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair. %s",
3206                             validation_error_map[VALIDATION_ERROR_00535]);
3207    }
3208    // Compute shaders should be specified independent of Gfx shaders
3209    if (pPipeline->active_shaders & VK_SHADER_STAGE_COMPUTE_BIT) {
3210        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3211                             VALIDATION_ERROR_00533, "DS",
3212                             "Invalid Pipeline CreateInfo State: Do not specify Compute Shader for Gfx Pipeline. %s",
3213                             validation_error_map[VALIDATION_ERROR_00533]);
3214    }
3215    // VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid for tessellation pipelines.
3216    // Mismatching primitive topology and tessellation fails graphics pipeline creation.
3217    if (pPipeline->active_shaders & (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) &&
3218        (!pPipeline->graphicsPipelineCI.pInputAssemblyState ||
3219         pPipeline->graphicsPipelineCI.pInputAssemblyState->topology != VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
3220        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3221                             VALIDATION_ERROR_02099, "DS",
3222                             "Invalid Pipeline CreateInfo State: "
3223                             "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST must be set as IA "
3224                             "topology for tessellation pipelines. %s",
3225                             validation_error_map[VALIDATION_ERROR_02099]);
3226    }
3227    if (pPipeline->graphicsPipelineCI.pInputAssemblyState &&
3228        pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) {
3229        if (~pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) {
3230            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3231                                 VALIDATION_ERROR_02100, "DS",
3232                                 "Invalid Pipeline CreateInfo State: "
3233                                 "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3234                                 "topology is only valid for tessellation pipelines. %s",
3235                                 validation_error_map[VALIDATION_ERROR_02100]);
3236        }
3237    }
3238
3239    if (pPipeline->graphicsPipelineCI.pTessellationState &&
3240        ((pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints == 0) ||
3241         (pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints >
3242          dev_data->phys_dev_properties.properties.limits.maxTessellationPatchSize))) {
3243        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3244                             VALIDATION_ERROR_01426, "DS",
3245                             "Invalid Pipeline CreateInfo State: "
3246                             "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3247                             "topology used with patchControlPoints value %u."
3248                             " patchControlPoints should be >0 and <=%u. %s",
3249                             pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints,
3250                             dev_data->phys_dev_properties.properties.limits.maxTessellationPatchSize,
3251                             validation_error_map[VALIDATION_ERROR_01426]);
3252    }
3253
3254    // If a rasterization state is provided, make sure that the line width conforms to the HW.
3255    if (pPipeline->graphicsPipelineCI.pRasterizationState) {
3256        if (!isDynamic(pPipeline, VK_DYNAMIC_STATE_LINE_WIDTH)) {
3257            skip_call |= verifyLineWidth(dev_data, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE,
3258                                         reinterpret_cast<uint64_t const &>(pPipeline->pipeline),
3259                                         pPipeline->graphicsPipelineCI.pRasterizationState->lineWidth);
3260        }
3261    }
3262
3263    // If rasterization is not disabled and subpass uses a depth/stencil attachment, pDepthStencilState must be a pointer to a
3264    // valid structure
3265    if (pPipeline->graphicsPipelineCI.pRasterizationState &&
3266        (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
3267        auto subpass_desc = renderPass ? &renderPass->createInfo.pSubpasses[pPipeline->graphicsPipelineCI.subpass] : nullptr;
3268        if (subpass_desc && subpass_desc->pDepthStencilAttachment &&
3269            subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
3270            if (!pPipeline->graphicsPipelineCI.pDepthStencilState) {
3271                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
3272                                     0, __LINE__, VALIDATION_ERROR_02115, "DS",
3273                                     "Invalid Pipeline CreateInfo State: "
3274                                     "pDepthStencilState is NULL when rasterization is enabled and subpass uses a "
3275                                     "depth/stencil attachment. %s",
3276                                     validation_error_map[VALIDATION_ERROR_02115]);
3277            }
3278        }
3279    }
3280    return skip_call;
3281}
3282
3283// Free the Pipeline nodes
3284static void deletePipelines(layer_data *dev_data) {
3285    if (dev_data->pipelineMap.size() <= 0) return;
3286    for (auto &pipe_map_pair : dev_data->pipelineMap) {
3287        delete pipe_map_pair.second;
3288    }
3289    dev_data->pipelineMap.clear();
3290}
3291
3292// Block of code at start here specifically for managing/tracking DSs
3293
3294// Return Pool node ptr for specified pool or else NULL
3295DESCRIPTOR_POOL_STATE *getDescriptorPoolState(const layer_data *dev_data, const VkDescriptorPool pool) {
3296    auto pool_it = dev_data->descriptorPoolMap.find(pool);
3297    if (pool_it == dev_data->descriptorPoolMap.end()) {
3298        return NULL;
3299    }
3300    return pool_it->second;
3301}
3302
3303// Validate that given set is valid and that it's not being used by an in-flight CmdBuffer
3304// func_str is the name of the calling function
3305// Return false if no errors occur
3306// Return true if validation error occurs and callback returns true (to skip upcoming API call down the chain)
3307static bool validateIdleDescriptorSet(const layer_data *dev_data, VkDescriptorSet set, std::string func_str) {
3308    if (dev_data->instance_data->disabled.idle_descriptor_set) return false;
3309    bool skip_call = false;
3310    auto set_node = dev_data->setMap.find(set);
3311    if (set_node == dev_data->setMap.end()) {
3312        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3313                             (uint64_t)(set), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
3314                             "Cannot call %s() on descriptor set 0x%" PRIxLEAST64 " that has not been allocated.", func_str.c_str(),
3315                             (uint64_t)(set));
3316    } else {
3317        // TODO : This covers various error cases so should pass error enum into this function and use passed in enum here
3318        if (set_node->second->in_use.load()) {
3319            skip_call |=
3320                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3321                        (uint64_t)(set), __LINE__, VALIDATION_ERROR_00919, "DS",
3322                        "Cannot call %s() on descriptor set 0x%" PRIxLEAST64 " that is in use by a command buffer. %s",
3323                        func_str.c_str(), (uint64_t)(set), validation_error_map[VALIDATION_ERROR_00919]);
3324        }
3325    }
3326    return skip_call;
3327}
3328
3329// Remove set from setMap and delete the set
3330static void freeDescriptorSet(layer_data *dev_data, cvdescriptorset::DescriptorSet *descriptor_set) {
3331    dev_data->setMap.erase(descriptor_set->GetSet());
3332    delete descriptor_set;
3333}
3334// Free all DS Pools including their Sets & related sub-structs
3335// NOTE : Calls to this function should be wrapped in mutex
3336static void deletePools(layer_data *dev_data) {
3337    if (dev_data->descriptorPoolMap.size() <= 0) return;
3338    for (auto ii = dev_data->descriptorPoolMap.begin(); ii != dev_data->descriptorPoolMap.end(); ++ii) {
3339        // Remove this pools' sets from setMap and delete them
3340        for (auto ds : (*ii).second->sets) {
3341            freeDescriptorSet(dev_data, ds);
3342        }
3343        (*ii).second->sets.clear();
3344    }
3345    dev_data->descriptorPoolMap.clear();
3346}
3347
3348static void clearDescriptorPool(layer_data *dev_data, const VkDevice device, const VkDescriptorPool pool,
3349                                VkDescriptorPoolResetFlags flags) {
3350    DESCRIPTOR_POOL_STATE *pPool = getDescriptorPoolState(dev_data, pool);
3351    // TODO: validate flags
3352    // For every set off of this pool, clear it, remove from setMap, and free cvdescriptorset::DescriptorSet
3353    for (auto ds : pPool->sets) {
3354        freeDescriptorSet(dev_data, ds);
3355    }
3356    pPool->sets.clear();
3357    // Reset available count for each type and available sets for this pool
3358    for (uint32_t i = 0; i < pPool->availableDescriptorTypeCount.size(); ++i) {
3359        pPool->availableDescriptorTypeCount[i] = pPool->maxDescriptorTypeCount[i];
3360    }
3361    pPool->availableSets = pPool->maxSets;
3362}
3363
3364// For given CB object, fetch associated CB Node from map
3365GLOBAL_CB_NODE *getCBNode(layer_data const *dev_data, const VkCommandBuffer cb) {
3366    auto it = dev_data->commandBufferMap.find(cb);
3367    if (it == dev_data->commandBufferMap.end()) {
3368        return NULL;
3369    }
3370    return it->second;
3371}
3372// Free all CB Nodes
3373// NOTE : Calls to this function should be wrapped in mutex
3374static void deleteCommandBuffers(layer_data *dev_data) {
3375    if (dev_data->commandBufferMap.empty()) {
3376        return;
3377    }
3378    for (auto ii = dev_data->commandBufferMap.begin(); ii != dev_data->commandBufferMap.end(); ++ii) {
3379        delete (*ii).second;
3380    }
3381    dev_data->commandBufferMap.clear();
3382}
3383
3384static bool report_error_no_cb_begin(const layer_data *dev_data, const VkCommandBuffer cb, const char *caller_name) {
3385    return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3386                   (uint64_t)cb, __LINE__, DRAWSTATE_NO_BEGIN_COMMAND_BUFFER, "DS",
3387                   "You must call vkBeginCommandBuffer() before this call to %s", caller_name);
3388}
3389
3390// If a renderpass is active, verify that the given command type is appropriate for current subpass state
3391bool ValidateCmdSubpassState(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd_type) {
3392    if (!pCB->activeRenderPass) return false;
3393    bool skip_call = false;
3394    if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS &&
3395        (cmd_type != CMD_EXECUTECOMMANDS && cmd_type != CMD_NEXTSUBPASS && cmd_type != CMD_ENDRENDERPASS)) {
3396        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3397                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3398                             "Commands cannot be called in a subpass using secondary command buffers.");
3399    } else if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_INLINE && cmd_type == CMD_EXECUTECOMMANDS) {
3400        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3401                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3402                             "vkCmdExecuteCommands() cannot be called in a subpass using inline commands.");
3403    }
3404    return skip_call;
3405}
3406
3407static bool checkGraphicsBit(const layer_data *dev_data, VkQueueFlags flags, const char *name) {
3408    if (!(flags & VK_QUEUE_GRAPHICS_BIT))
3409        return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3410                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3411                       "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
3412    return false;
3413}
3414
3415static bool checkComputeBit(const layer_data *dev_data, VkQueueFlags flags, const char *name) {
3416    if (!(flags & VK_QUEUE_COMPUTE_BIT))
3417        return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3418                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3419                       "Cannot call %s on a command buffer allocated from a pool without compute capabilities.", name);
3420    return false;
3421}
3422
3423static bool checkGraphicsOrComputeBit(const layer_data *dev_data, VkQueueFlags flags, const char *name) {
3424    if (!((flags & VK_QUEUE_GRAPHICS_BIT) || (flags & VK_QUEUE_COMPUTE_BIT)))
3425        return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3426                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3427                       "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
3428    return false;
3429}
3430
3431// Validate the given command being added to the specified cmd buffer, flagging errors if CB is not in the recording state or if
3432// there's an issue with the Cmd ordering
3433bool ValidateCmd(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd, const char *caller_name) {
3434    bool skip_call = false;
3435    auto pPool = getCommandPoolNode(dev_data, pCB->createInfo.commandPool);
3436    if (pPool) {
3437        VkQueueFlags flags = dev_data->phys_dev_properties.queue_family_properties[pPool->queueFamilyIndex].queueFlags;
3438        switch (cmd) {
3439            case CMD_BINDPIPELINE:
3440            case CMD_BINDPIPELINEDELTA:
3441            case CMD_BINDDESCRIPTORSETS:
3442            case CMD_FILLBUFFER:
3443            case CMD_CLEARCOLORIMAGE:
3444            case CMD_SETEVENT:
3445            case CMD_RESETEVENT:
3446            case CMD_WAITEVENTS:
3447            case CMD_BEGINQUERY:
3448            case CMD_ENDQUERY:
3449            case CMD_RESETQUERYPOOL:
3450            case CMD_COPYQUERYPOOLRESULTS:
3451            case CMD_WRITETIMESTAMP:
3452                skip_call |= checkGraphicsOrComputeBit(dev_data, flags, cmdTypeToString(cmd).c_str());
3453                break;
3454            case CMD_SETVIEWPORTSTATE:
3455            case CMD_SETSCISSORSTATE:
3456            case CMD_SETLINEWIDTHSTATE:
3457            case CMD_SETDEPTHBIASSTATE:
3458            case CMD_SETBLENDSTATE:
3459            case CMD_SETDEPTHBOUNDSSTATE:
3460            case CMD_SETSTENCILREADMASKSTATE:
3461            case CMD_SETSTENCILWRITEMASKSTATE:
3462            case CMD_SETSTENCILREFERENCESTATE:
3463            case CMD_BINDINDEXBUFFER:
3464            case CMD_BINDVERTEXBUFFER:
3465            case CMD_DRAW:
3466            case CMD_DRAWINDEXED:
3467            case CMD_DRAWINDIRECT:
3468            case CMD_DRAWINDEXEDINDIRECT:
3469            case CMD_BLITIMAGE:
3470            case CMD_CLEARATTACHMENTS:
3471            case CMD_CLEARDEPTHSTENCILIMAGE:
3472            case CMD_RESOLVEIMAGE:
3473            case CMD_BEGINRENDERPASS:
3474            case CMD_NEXTSUBPASS:
3475            case CMD_ENDRENDERPASS:
3476                skip_call |= checkGraphicsBit(dev_data, flags, cmdTypeToString(cmd).c_str());
3477                break;
3478            case CMD_DISPATCH:
3479            case CMD_DISPATCHINDIRECT:
3480                skip_call |= checkComputeBit(dev_data, flags, cmdTypeToString(cmd).c_str());
3481                break;
3482            case CMD_COPYBUFFER:
3483            case CMD_COPYIMAGE:
3484            case CMD_COPYBUFFERTOIMAGE:
3485            case CMD_COPYIMAGETOBUFFER:
3486            case CMD_CLONEIMAGEDATA:
3487            case CMD_UPDATEBUFFER:
3488            case CMD_PIPELINEBARRIER:
3489            case CMD_EXECUTECOMMANDS:
3490            case CMD_END:
3491                break;
3492            default:
3493                break;
3494        }
3495    }
3496    if (pCB->state != CB_RECORDING) {
3497        skip_call |= report_error_no_cb_begin(dev_data, pCB->commandBuffer, caller_name);
3498    } else {
3499        skip_call |= ValidateCmdSubpassState(dev_data, pCB, cmd);
3500    }
3501    return skip_call;
3502}
3503
3504void UpdateCmdBufferLastCmd(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, const CMD_TYPE cmd) {
3505    if (cb_state->state == CB_RECORDING) {
3506        cb_state->last_cmd = cmd;
3507    }
3508}
3509// For given object struct return a ptr of BASE_NODE type for its wrapping struct
3510BASE_NODE *GetStateStructPtrFromObject(layer_data *dev_data, VK_OBJECT object_struct) {
3511    BASE_NODE *base_ptr = nullptr;
3512    switch (object_struct.type) {
3513        case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT: {
3514            base_ptr = getSetNode(dev_data, reinterpret_cast<VkDescriptorSet &>(object_struct.handle));
3515            break;
3516        }
3517        case VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT: {
3518            base_ptr = getSamplerState(dev_data, reinterpret_cast<VkSampler &>(object_struct.handle));
3519            break;
3520        }
3521        case VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT: {
3522            base_ptr = getQueryPoolNode(dev_data, reinterpret_cast<VkQueryPool &>(object_struct.handle));
3523            break;
3524        }
3525        case VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT: {
3526            base_ptr = getPipelineState(dev_data, reinterpret_cast<VkPipeline &>(object_struct.handle));
3527            break;
3528        }
3529        case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
3530            base_ptr = getBufferState(dev_data, reinterpret_cast<VkBuffer &>(object_struct.handle));
3531            break;
3532        }
3533        case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT: {
3534            base_ptr = getBufferViewState(dev_data, reinterpret_cast<VkBufferView &>(object_struct.handle));
3535            break;
3536        }
3537        case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
3538            base_ptr = getImageState(dev_data, reinterpret_cast<VkImage &>(object_struct.handle));
3539            break;
3540        }
3541        case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT: {
3542            base_ptr = getImageViewState(dev_data, reinterpret_cast<VkImageView &>(object_struct.handle));
3543            break;
3544        }
3545        case VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT: {
3546            base_ptr = getEventNode(dev_data, reinterpret_cast<VkEvent &>(object_struct.handle));
3547            break;
3548        }
3549        case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT: {
3550            base_ptr = getDescriptorPoolState(dev_data, reinterpret_cast<VkDescriptorPool &>(object_struct.handle));
3551            break;
3552        }
3553        case VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT: {
3554            base_ptr = getCommandPoolNode(dev_data, reinterpret_cast<VkCommandPool &>(object_struct.handle));
3555            break;
3556        }
3557        case VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT: {
3558            base_ptr = getFramebufferState(dev_data, reinterpret_cast<VkFramebuffer &>(object_struct.handle));
3559            break;
3560        }
3561        case VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT: {
3562            base_ptr = getRenderPassState(dev_data, reinterpret_cast<VkRenderPass &>(object_struct.handle));
3563            break;
3564        }
3565        case VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT: {
3566            base_ptr = getMemObjInfo(dev_data, reinterpret_cast<VkDeviceMemory &>(object_struct.handle));
3567            break;
3568        }
3569        default:
3570            // TODO : Any other objects to be handled here?
3571            assert(0);
3572            break;
3573    }
3574    return base_ptr;
3575}
3576
3577// Tie the VK_OBJECT to the cmd buffer which includes:
3578//  Add object_binding to cmd buffer
3579//  Add cb_binding to object
3580static void addCommandBufferBinding(std::unordered_set<GLOBAL_CB_NODE *> *cb_bindings, VK_OBJECT obj, GLOBAL_CB_NODE *cb_node) {
3581    cb_bindings->insert(cb_node);
3582    cb_node->object_bindings.insert(obj);
3583}
3584// For a given object, if cb_node is in that objects cb_bindings, remove cb_node
3585static void removeCommandBufferBinding(layer_data *dev_data, VK_OBJECT const *object, GLOBAL_CB_NODE *cb_node) {
3586    BASE_NODE *base_obj = GetStateStructPtrFromObject(dev_data, *object);
3587    if (base_obj) base_obj->cb_bindings.erase(cb_node);
3588}
3589// Reset the command buffer state
3590//  Maintain the createInfo and set state to CB_NEW, but clear all other state
3591static void resetCB(layer_data *dev_data, const VkCommandBuffer cb) {
3592    GLOBAL_CB_NODE *pCB = dev_data->commandBufferMap[cb];
3593    if (pCB) {
3594        pCB->in_use.store(0);
3595        pCB->last_cmd = CMD_NONE;
3596        // Reset CB state (note that createInfo is not cleared)
3597        pCB->commandBuffer = cb;
3598        memset(&pCB->beginInfo, 0, sizeof(VkCommandBufferBeginInfo));
3599        memset(&pCB->inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo));
3600        pCB->numCmds = 0;
3601        memset(pCB->drawCount, 0, NUM_DRAW_TYPES * sizeof(uint64_t));
3602        pCB->state = CB_NEW;
3603        pCB->submitCount = 0;
3604        pCB->status = 0;
3605        pCB->viewportMask = 0;
3606        pCB->scissorMask = 0;
3607
3608        for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
3609            pCB->lastBound[i].reset();
3610        }
3611
3612        memset(&pCB->activeRenderPassBeginInfo, 0, sizeof(pCB->activeRenderPassBeginInfo));
3613        pCB->activeRenderPass = nullptr;
3614        pCB->activeSubpassContents = VK_SUBPASS_CONTENTS_INLINE;
3615        pCB->activeSubpass = 0;
3616        pCB->broken_bindings.clear();
3617        pCB->waitedEvents.clear();
3618        pCB->events.clear();
3619        pCB->writeEventsBeforeWait.clear();
3620        pCB->waitedEventsBeforeQueryReset.clear();
3621        pCB->queryToStateMap.clear();
3622        pCB->activeQueries.clear();
3623        pCB->startedQueries.clear();
3624        pCB->imageSubresourceMap.clear();
3625        pCB->imageLayoutMap.clear();
3626        pCB->eventToStageMap.clear();
3627        pCB->drawData.clear();
3628        pCB->currentDrawData.buffers.clear();
3629        pCB->vertex_buffer_used = false;
3630        pCB->primaryCommandBuffer = VK_NULL_HANDLE;
3631        // Make sure any secondaryCommandBuffers are removed from globalInFlight
3632        for (auto secondary_cb : pCB->secondaryCommandBuffers) {
3633            dev_data->globalInFlightCmdBuffers.erase(secondary_cb);
3634        }
3635        pCB->secondaryCommandBuffers.clear();
3636        pCB->updateImages.clear();
3637        pCB->updateBuffers.clear();
3638        clear_cmd_buf_and_mem_references(dev_data, pCB);
3639        pCB->eventUpdates.clear();
3640        pCB->queryUpdates.clear();
3641
3642        // Remove object bindings
3643        for (auto obj : pCB->object_bindings) {
3644            removeCommandBufferBinding(dev_data, &obj, pCB);
3645        }
3646        pCB->object_bindings.clear();
3647        // Remove this cmdBuffer's reference from each FrameBuffer's CB ref list
3648        for (auto framebuffer : pCB->framebuffers) {
3649            auto fb_state = getFramebufferState(dev_data, framebuffer);
3650            if (fb_state) fb_state->cb_bindings.erase(pCB);
3651        }
3652        pCB->framebuffers.clear();
3653        pCB->activeFramebuffer = VK_NULL_HANDLE;
3654    }
3655}
3656
3657// Set PSO-related status bits for CB, including dynamic state set via PSO
3658static void set_cb_pso_status(GLOBAL_CB_NODE *pCB, const PIPELINE_STATE *pPipe) {
3659    // Account for any dynamic state not set via this PSO
3660    if (!pPipe->graphicsPipelineCI.pDynamicState ||
3661        !pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount) {  // All state is static
3662        pCB->status |= CBSTATUS_ALL_STATE_SET;
3663    } else {
3664        // First consider all state on
3665        // Then unset any state that's noted as dynamic in PSO
3666        // Finally OR that into CB statemask
3667        CBStatusFlags psoDynStateMask = CBSTATUS_ALL_STATE_SET;
3668        for (uint32_t i = 0; i < pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
3669            switch (pPipe->graphicsPipelineCI.pDynamicState->pDynamicStates[i]) {
3670                case VK_DYNAMIC_STATE_LINE_WIDTH:
3671                    psoDynStateMask &= ~CBSTATUS_LINE_WIDTH_SET;
3672                    break;
3673                case VK_DYNAMIC_STATE_DEPTH_BIAS:
3674                    psoDynStateMask &= ~CBSTATUS_DEPTH_BIAS_SET;
3675                    break;
3676                case VK_DYNAMIC_STATE_BLEND_CONSTANTS:
3677                    psoDynStateMask &= ~CBSTATUS_BLEND_CONSTANTS_SET;
3678                    break;
3679                case VK_DYNAMIC_STATE_DEPTH_BOUNDS:
3680                    psoDynStateMask &= ~CBSTATUS_DEPTH_BOUNDS_SET;
3681                    break;
3682                case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK:
3683                    psoDynStateMask &= ~CBSTATUS_STENCIL_READ_MASK_SET;
3684                    break;
3685                case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK:
3686                    psoDynStateMask &= ~CBSTATUS_STENCIL_WRITE_MASK_SET;
3687                    break;
3688                case VK_DYNAMIC_STATE_STENCIL_REFERENCE:
3689                    psoDynStateMask &= ~CBSTATUS_STENCIL_REFERENCE_SET;
3690                    break;
3691                default:
3692                    // TODO : Flag error here
3693                    break;
3694            }
3695        }
3696        pCB->status |= psoDynStateMask;
3697    }
3698}
3699
3700// Flags validation error if the associated call is made inside a render pass. The apiName routine should ONLY be called outside a
3701// render pass.
3702bool insideRenderPass(const layer_data *dev_data, GLOBAL_CB_NODE *pCB, const char *apiName, UNIQUE_VALIDATION_ERROR_CODE msgCode) {
3703    bool inside = false;
3704    if (pCB->activeRenderPass) {
3705        inside = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3706                         (uint64_t)pCB->commandBuffer, __LINE__, msgCode, "DS",
3707                         "%s: It is invalid to issue this call inside an active render pass (0x%" PRIxLEAST64 "). %s", apiName,
3708                         (uint64_t)pCB->activeRenderPass->renderPass, validation_error_map[msgCode]);
3709    }
3710    return inside;
3711}
3712
3713// Flags validation error if the associated call is made outside a render pass. The apiName
3714// routine should ONLY be called inside a render pass.
3715bool outsideRenderPass(const layer_data *dev_data, GLOBAL_CB_NODE *pCB, const char *apiName, UNIQUE_VALIDATION_ERROR_CODE msgCode) {
3716    bool outside = false;
3717    if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (!pCB->activeRenderPass)) ||
3718        ((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && (!pCB->activeRenderPass) &&
3719         !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))) {
3720        outside = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3721                          (uint64_t)pCB->commandBuffer, __LINE__, msgCode, "DS",
3722                          "%s: This call must be issued inside an active render pass. %s", apiName, validation_error_map[msgCode]);
3723    }
3724    return outside;
3725}
3726
3727static void init_core_validation(instance_layer_data *instance_data, const VkAllocationCallbacks *pAllocator) {
3728    layer_debug_actions(instance_data->report_data, instance_data->logging_callback, pAllocator, "lunarg_core_validation");
3729}
3730
3731static void checkInstanceRegisterExtensions(const VkInstanceCreateInfo *pCreateInfo, instance_layer_data *instance_data) {
3732    for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
3733        if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SURFACE_EXTENSION_NAME))
3734            instance_data->surfaceExtensionEnabled = true;
3735        if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_DISPLAY_EXTENSION_NAME))
3736            instance_data->displayExtensionEnabled = true;
3737#ifdef VK_USE_PLATFORM_ANDROID_KHR
3738        if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_ANDROID_SURFACE_EXTENSION_NAME))
3739            instance_data->androidSurfaceExtensionEnabled = true;
3740#endif
3741#ifdef VK_USE_PLATFORM_MIR_KHR
3742        if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_MIR_SURFACE_EXTENSION_NAME))
3743            instance_data->mirSurfaceExtensionEnabled = true;
3744#endif
3745#ifdef VK_USE_PLATFORM_WAYLAND_KHR
3746        if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME))
3747            instance_data->waylandSurfaceExtensionEnabled = true;
3748#endif
3749#ifdef VK_USE_PLATFORM_WIN32_KHR
3750        if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_WIN32_SURFACE_EXTENSION_NAME))
3751            instance_data->win32SurfaceExtensionEnabled = true;
3752#endif
3753#ifdef VK_USE_PLATFORM_XCB_KHR
3754        if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_XCB_SURFACE_EXTENSION_NAME))
3755            instance_data->xcbSurfaceExtensionEnabled = true;
3756#endif
3757#ifdef VK_USE_PLATFORM_XLIB_KHR
3758        if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_XLIB_SURFACE_EXTENSION_NAME))
3759            instance_data->xlibSurfaceExtensionEnabled = true;
3760#endif
3761    }
3762}
3763
3764VKAPI_ATTR VkResult VKAPI_CALL CreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
3765                                              VkInstance *pInstance) {
3766    VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
3767
3768    assert(chain_info->u.pLayerInfo);
3769    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
3770    PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
3771    if (fpCreateInstance == NULL) return VK_ERROR_INITIALIZATION_FAILED;
3772
3773    // Advance the link info for the next element on the chain
3774    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
3775
3776    VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
3777    if (result != VK_SUCCESS) return result;
3778
3779    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(*pInstance), instance_layer_data_map);
3780    instance_data->instance = *pInstance;
3781    layer_init_instance_dispatch_table(*pInstance, &instance_data->dispatch_table, fpGetInstanceProcAddr);
3782    instance_data->report_data = debug_report_create_instance(
3783        &instance_data->dispatch_table, *pInstance, pCreateInfo->enabledExtensionCount, pCreateInfo->ppEnabledExtensionNames);
3784    checkInstanceRegisterExtensions(pCreateInfo, instance_data);
3785    init_core_validation(instance_data, pAllocator);
3786
3787    ValidateLayerOrdering(*pCreateInfo);
3788
3789    return result;
3790}
3791
3792// Hook DestroyInstance to remove tableInstanceMap entry
3793VKAPI_ATTR void VKAPI_CALL DestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
3794    // TODOSC : Shouldn't need any customization here
3795    dispatch_key key = get_dispatch_key(instance);
3796    // TBD: Need any locking this early, in case this function is called at the
3797    // same time by more than one thread?
3798    instance_layer_data *instance_data = GetLayerDataPtr(key, instance_layer_data_map);
3799    instance_data->dispatch_table.DestroyInstance(instance, pAllocator);
3800
3801    std::lock_guard<std::mutex> lock(global_lock);
3802    // Clean up logging callback, if any
3803    while (instance_data->logging_callback.size() > 0) {
3804        VkDebugReportCallbackEXT callback = instance_data->logging_callback.back();
3805        layer_destroy_msg_callback(instance_data->report_data, callback, pAllocator);
3806        instance_data->logging_callback.pop_back();
3807    }
3808
3809    layer_debug_report_destroy_instance(instance_data->report_data);
3810    layer_data_map.erase(key);
3811}
3812
3813static void checkDeviceRegisterExtensions(const VkDeviceCreateInfo *pCreateInfo, VkDevice device) {
3814    uint32_t i;
3815    // TBD: Need any locking, in case this function is called at the same time by more than one thread?
3816    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3817    dev_data->device_extensions.wsi_enabled = false;
3818    dev_data->device_extensions.wsi_display_swapchain_enabled = false;
3819    dev_data->device_extensions.nv_glsl_shader_enabled = false;
3820
3821    for (i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
3822        if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME) == 0) {
3823            dev_data->device_extensions.wsi_enabled = true;
3824        }
3825        if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_DISPLAY_SWAPCHAIN_EXTENSION_NAME) == 0) {
3826            dev_data->device_extensions.wsi_display_swapchain_enabled = true;
3827        }
3828        if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_NV_GLSL_SHADER_EXTENSION_NAME) == 0) {
3829            dev_data->device_extensions.nv_glsl_shader_enabled = true;
3830        }
3831    }
3832}
3833
3834// Verify that queue family has been properly requested
3835static bool ValidateRequestedQueueFamilyProperties(instance_layer_data *instance_data, VkPhysicalDevice gpu,
3836                                                   const VkDeviceCreateInfo *create_info) {
3837    bool skip_call = false;
3838    auto physical_device_state = getPhysicalDeviceState(instance_data, gpu);
3839    // First check is app has actually requested queueFamilyProperties
3840    if (!physical_device_state) {
3841        skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
3842                             VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_MUST_QUERY_COUNT, "DL",
3843                             "Invalid call to vkCreateDevice() w/o first calling vkEnumeratePhysicalDevices().");
3844    } else if (QUERY_DETAILS != physical_device_state->vkGetPhysicalDeviceQueueFamilyPropertiesState) {
3845        // TODO: This is not called out as an invalid use in the spec so make more informative recommendation.
3846        skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
3847                             VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST,
3848                             "DL", "Call to vkCreateDevice() w/o first calling vkGetPhysicalDeviceQueueFamilyProperties().");
3849    } else {
3850        // Check that the requested queue properties are valid
3851        for (uint32_t i = 0; i < create_info->queueCreateInfoCount; i++) {
3852            uint32_t requestedIndex = create_info->pQueueCreateInfos[i].queueFamilyIndex;
3853            if (requestedIndex >= physical_device_state->queue_family_properties.size()) {
3854                skip_call |= log_msg(
3855                    instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
3856                    __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST, "DL",
3857                    "Invalid queue create request in vkCreateDevice(). Invalid queueFamilyIndex %u requested.", requestedIndex);
3858            } else if (create_info->pQueueCreateInfos[i].queueCount >
3859                       physical_device_state->queue_family_properties[requestedIndex].queueCount) {
3860                skip_call |= log_msg(
3861                    instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
3862                    __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST, "DL",
3863                    "Invalid queue create request in vkCreateDevice(). QueueFamilyIndex %u only has %u queues, but "
3864                    "requested queueCount is %u.",
3865                    requestedIndex, physical_device_state->queue_family_properties[requestedIndex].queueCount,
3866                    create_info->pQueueCreateInfos[i].queueCount);
3867            }
3868        }
3869    }
3870    return skip_call;
3871}
3872
3873// Verify that features have been queried and that they are available
3874static bool ValidateRequestedFeatures(instance_layer_data *dev_data, VkPhysicalDevice phys,
3875                                      const VkPhysicalDeviceFeatures *requested_features) {
3876    bool skip_call = false;
3877
3878    auto phys_device_state = getPhysicalDeviceState(dev_data, phys);
3879    const VkBool32 *actual = reinterpret_cast<VkBool32 *>(&phys_device_state->features);
3880    const VkBool32 *requested = reinterpret_cast<const VkBool32 *>(requested_features);
3881    // TODO : This is a nice, compact way to loop through struct, but a bad way to report issues
3882    //  Need to provide the struct member name with the issue. To do that seems like we'll
3883    //  have to loop through each struct member which should be done w/ codegen to keep in synch.
3884    uint32_t errors = 0;
3885    uint32_t total_bools = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
3886    for (uint32_t i = 0; i < total_bools; i++) {
3887        if (requested[i] > actual[i]) {
3888            // TODO: Add index to struct member name helper to be able to include a feature name
3889            skip_call |=
3890                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
3891                        __LINE__, DEVLIMITS_INVALID_FEATURE_REQUESTED, "DL",
3892                        "While calling vkCreateDevice(), requesting feature #%u in VkPhysicalDeviceFeatures struct, "
3893                        "which is not available on this device.",
3894                        i);
3895            errors++;
3896        }
3897    }
3898    if (errors && (UNCALLED == phys_device_state->vkGetPhysicalDeviceFeaturesState)) {
3899        // If user didn't request features, notify them that they should
3900        // TODO: Verify this against the spec. I believe this is an invalid use of the API and should return an error
3901        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
3902                             0, __LINE__, DEVLIMITS_INVALID_FEATURE_REQUESTED, "DL",
3903                             "You requested features that are unavailable on this device. You should first query feature "
3904                             "availability by calling vkGetPhysicalDeviceFeatures().");
3905    }
3906    return skip_call;
3907}
3908
3909VKAPI_ATTR VkResult VKAPI_CALL CreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
3910                                            const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
3911    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(gpu), instance_layer_data_map);
3912    bool skip_call = false;
3913
3914    // Check that any requested features are available
3915    if (pCreateInfo->pEnabledFeatures) {
3916        skip_call |= ValidateRequestedFeatures(instance_data, gpu, pCreateInfo->pEnabledFeatures);
3917    }
3918    skip_call |= ValidateRequestedQueueFamilyProperties(instance_data, gpu, pCreateInfo);
3919
3920    if (skip_call) {
3921        return VK_ERROR_VALIDATION_FAILED_EXT;
3922    }
3923
3924    VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
3925
3926    assert(chain_info->u.pLayerInfo);
3927    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
3928    PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
3929    PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(instance_data->instance, "vkCreateDevice");
3930    if (fpCreateDevice == NULL) {
3931        return VK_ERROR_INITIALIZATION_FAILED;
3932    }
3933
3934    // Advance the link info for the next element on the chain
3935    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
3936
3937    VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
3938    if (result != VK_SUCCESS) {
3939        return result;
3940    }
3941
3942    std::unique_lock<std::mutex> lock(global_lock);
3943    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(*pDevice), layer_data_map);
3944
3945    device_data->instance_data = instance_data;
3946    // Setup device dispatch table
3947    layer_init_device_dispatch_table(*pDevice, &device_data->dispatch_table, fpGetDeviceProcAddr);
3948    device_data->device = *pDevice;
3949    // Save PhysicalDevice handle
3950    device_data->physical_device = gpu;
3951
3952    device_data->report_data = layer_debug_report_create_device(instance_data->report_data, *pDevice);
3953    checkDeviceRegisterExtensions(pCreateInfo, *pDevice);
3954    // Get physical device limits for this device
3955    instance_data->dispatch_table.GetPhysicalDeviceProperties(gpu, &(device_data->phys_dev_properties.properties));
3956    uint32_t count;
3957    instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(gpu, &count, nullptr);
3958    device_data->phys_dev_properties.queue_family_properties.resize(count);
3959    instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(
3960        gpu, &count, &device_data->phys_dev_properties.queue_family_properties[0]);
3961    // TODO: device limits should make sure these are compatible
3962    if (pCreateInfo->pEnabledFeatures) {
3963        device_data->enabled_features = *pCreateInfo->pEnabledFeatures;
3964    } else {
3965        memset(&device_data->enabled_features, 0, sizeof(VkPhysicalDeviceFeatures));
3966    }
3967    // Store physical device properties and physical device mem limits into device layer_data structs
3968    instance_data->dispatch_table.GetPhysicalDeviceMemoryProperties(gpu, &device_data->phys_dev_mem_props);
3969    instance_data->dispatch_table.GetPhysicalDeviceProperties(gpu, &device_data->phys_dev_props);
3970    lock.unlock();
3971
3972    ValidateLayerOrdering(*pCreateInfo);
3973
3974    return result;
3975}
3976
3977// prototype
3978VKAPI_ATTR void VKAPI_CALL DestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
3979    // TODOSC : Shouldn't need any customization here
3980    bool skip = false;
3981    dispatch_key key = get_dispatch_key(device);
3982    layer_data *dev_data = GetLayerDataPtr(key, layer_data_map);
3983    // Free all the memory
3984    std::unique_lock<std::mutex> lock(global_lock);
3985    deletePipelines(dev_data);
3986    dev_data->renderPassMap.clear();
3987    deleteCommandBuffers(dev_data);
3988    // This will also delete all sets in the pool & remove them from setMap
3989    deletePools(dev_data);
3990    // All sets should be removed
3991    assert(dev_data->setMap.empty());
3992    for (auto del_layout : dev_data->descriptorSetLayoutMap) {
3993        delete del_layout.second;
3994    }
3995    dev_data->descriptorSetLayoutMap.clear();
3996    dev_data->imageViewMap.clear();
3997    dev_data->imageMap.clear();
3998    dev_data->imageSubresourceMap.clear();
3999    dev_data->imageLayoutMap.clear();
4000    dev_data->bufferViewMap.clear();
4001    dev_data->bufferMap.clear();
4002    // Queues persist until device is destroyed
4003    dev_data->queueMap.clear();
4004    // Report any memory leaks
4005    layer_debug_report_destroy_device(device);
4006    lock.unlock();
4007
4008#if DISPATCH_MAP_DEBUG
4009    fprintf(stderr, "Device: 0x%p, key: 0x%p\n", device, key);
4010#endif
4011    if (!skip) {
4012        dev_data->dispatch_table.DestroyDevice(device, pAllocator);
4013        layer_data_map.erase(key);
4014    }
4015}
4016
4017static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION}};
4018
4019// For given stage mask, if Geometry shader stage is on w/o GS being enabled, report geo_error_id
4020//   and if Tessellation Control or Evaluation shader stages are on w/o TS being enabled, report tess_error_id
4021static bool ValidateStageMaskGsTsEnables(layer_data *dev_data, VkPipelineStageFlags stageMask, const char *caller,
4022                                         UNIQUE_VALIDATION_ERROR_CODE geo_error_id, UNIQUE_VALIDATION_ERROR_CODE tess_error_id) {
4023    bool skip = false;
4024    if (!dev_data->enabled_features.geometryShader && (stageMask & VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT)) {
4025        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
4026                        geo_error_id, "DL",
4027                        "%s call includes a stageMask with VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT bit set when "
4028                        "device does not have geometryShader feature enabled. %s",
4029                        caller, validation_error_map[geo_error_id]);
4030    }
4031    if (!dev_data->enabled_features.tessellationShader &&
4032        (stageMask & (VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT))) {
4033        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
4034                        tess_error_id, "DL",
4035                        "%s call includes a stageMask with VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT "
4036                        "and/or VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT bit(s) set when device "
4037                        "does not have tessellationShader feature enabled. %s",
4038                        caller, validation_error_map[tess_error_id]);
4039    }
4040    return skip;
4041}
4042
4043// Loop through bound objects and increment their in_use counts
4044//  For any unknown objects, flag an error
4045static bool ValidateAndIncrementBoundObjects(layer_data *dev_data, GLOBAL_CB_NODE const *cb_node) {
4046    bool skip = false;
4047    DRAW_STATE_ERROR error_code = DRAWSTATE_NONE;
4048    BASE_NODE *base_obj = nullptr;
4049    for (auto obj : cb_node->object_bindings) {
4050        switch (obj.type) {
4051            case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT: {
4052                base_obj = getSetNode(dev_data, reinterpret_cast<VkDescriptorSet &>(obj.handle));
4053                error_code = DRAWSTATE_INVALID_DESCRIPTOR_SET;
4054                break;
4055            }
4056            case VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT: {
4057                base_obj = getSamplerState(dev_data, reinterpret_cast<VkSampler &>(obj.handle));
4058                error_code = DRAWSTATE_INVALID_SAMPLER;
4059                break;
4060            }
4061            case VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT: {
4062                base_obj = getQueryPoolNode(dev_data, reinterpret_cast<VkQueryPool &>(obj.handle));
4063                error_code = DRAWSTATE_INVALID_QUERY_POOL;
4064                break;
4065            }
4066            case VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT: {
4067                base_obj = getPipelineState(dev_data, reinterpret_cast<VkPipeline &>(obj.handle));
4068                error_code = DRAWSTATE_INVALID_PIPELINE;
4069                break;
4070            }
4071            case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
4072                base_obj = getBufferState(dev_data, reinterpret_cast<VkBuffer &>(obj.handle));
4073                error_code = DRAWSTATE_INVALID_BUFFER;
4074                break;
4075            }
4076            case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT: {
4077                base_obj = getBufferViewState(dev_data, reinterpret_cast<VkBufferView &>(obj.handle));
4078                error_code = DRAWSTATE_INVALID_BUFFER_VIEW;
4079                break;
4080            }
4081            case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
4082                base_obj = getImageState(dev_data, reinterpret_cast<VkImage &>(obj.handle));
4083                error_code = DRAWSTATE_INVALID_IMAGE;
4084                break;
4085            }
4086            case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT: {
4087                base_obj = getImageViewState(dev_data, reinterpret_cast<VkImageView &>(obj.handle));
4088                error_code = DRAWSTATE_INVALID_IMAGE_VIEW;
4089                break;
4090            }
4091            case VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT: {
4092                base_obj = getEventNode(dev_data, reinterpret_cast<VkEvent &>(obj.handle));
4093                error_code = DRAWSTATE_INVALID_EVENT;
4094                break;
4095            }
4096            case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT: {
4097                base_obj = getDescriptorPoolState(dev_data, reinterpret_cast<VkDescriptorPool &>(obj.handle));
4098                error_code = DRAWSTATE_INVALID_DESCRIPTOR_POOL;
4099                break;
4100            }
4101            case VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT: {
4102                base_obj = getCommandPoolNode(dev_data, reinterpret_cast<VkCommandPool &>(obj.handle));
4103                error_code = DRAWSTATE_INVALID_COMMAND_POOL;
4104                break;
4105            }
4106            case VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT: {
4107                base_obj = getFramebufferState(dev_data, reinterpret_cast<VkFramebuffer &>(obj.handle));
4108                error_code = DRAWSTATE_INVALID_FRAMEBUFFER;
4109                break;
4110            }
4111            case VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT: {
4112                base_obj = getRenderPassState(dev_data, reinterpret_cast<VkRenderPass &>(obj.handle));
4113                error_code = DRAWSTATE_INVALID_RENDERPASS;
4114                break;
4115            }
4116            case VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT: {
4117                base_obj = getMemObjInfo(dev_data, reinterpret_cast<VkDeviceMemory &>(obj.handle));
4118                error_code = DRAWSTATE_INVALID_DEVICE_MEMORY;
4119                break;
4120            }
4121            default:
4122                // TODO : Merge handling of other objects types into this code
4123                break;
4124        }
4125        if (!base_obj) {
4126            skip |=
4127                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj.type, obj.handle, __LINE__, error_code, "DS",
4128                        "Cannot submit cmd buffer using deleted %s 0x%" PRIx64 ".", object_type_to_string(obj.type), obj.handle);
4129        } else {
4130            base_obj->in_use.fetch_add(1);
4131        }
4132    }
4133    return skip;
4134}
4135
4136// Track which resources are in-flight by atomically incrementing their "in_use" count
4137static bool validateAndIncrementResources(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
4138    bool skip_call = false;
4139
4140    cb_node->in_use.fetch_add(1);
4141    dev_data->globalInFlightCmdBuffers.insert(cb_node->commandBuffer);
4142
4143    // First Increment for all "generic" objects bound to cmd buffer, followed by special-case objects below
4144    skip_call |= ValidateAndIncrementBoundObjects(dev_data, cb_node);
4145    // TODO : We should be able to remove the NULL look-up checks from the code below as long as
4146    //  all the corresponding cases are verified to cause CB_INVALID state and the CB_INVALID state
4147    //  should then be flagged prior to calling this function
4148    for (auto drawDataElement : cb_node->drawData) {
4149        for (auto buffer : drawDataElement.buffers) {
4150            auto buffer_state = getBufferState(dev_data, buffer);
4151            if (!buffer_state) {
4152                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
4153                                     (uint64_t)(buffer), __LINE__, DRAWSTATE_INVALID_BUFFER, "DS",
4154                                     "Cannot submit cmd buffer using deleted buffer 0x%" PRIx64 ".", (uint64_t)(buffer));
4155            } else {
4156                buffer_state->in_use.fetch_add(1);
4157            }
4158        }
4159    }
4160    for (auto event : cb_node->writeEventsBeforeWait) {
4161        auto event_state = getEventNode(dev_data, event);
4162        if (event_state) event_state->write_in_use++;
4163    }
4164    return skip_call;
4165}
4166
4167// Note: This function assumes that the global lock is held by the calling thread.
4168// For the given queue, verify the queue state up to the given seq number.
4169// Currently the only check is to make sure that if there are events to be waited on prior to
4170//  a QueryReset, make sure that all such events have been signalled.
4171static bool VerifyQueueStateToSeq(layer_data *dev_data, QUEUE_STATE *queue, uint64_t seq) {
4172    bool skip = false;
4173    auto queue_seq = queue->seq;
4174    std::unordered_map<VkQueue, uint64_t> other_queue_seqs;
4175    auto sub_it = queue->submissions.begin();
4176    while (queue_seq < seq) {
4177        for (auto &wait : sub_it->waitSemaphores) {
4178            auto &last_seq = other_queue_seqs[wait.queue];
4179            last_seq = std::max(last_seq, wait.seq);
4180        }
4181        for (auto cb : sub_it->cbs) {
4182            auto cb_node = getCBNode(dev_data, cb);
4183            if (cb_node) {
4184                for (auto queryEventsPair : cb_node->waitedEventsBeforeQueryReset) {
4185                    for (auto event : queryEventsPair.second) {
4186                        if (dev_data->eventMap[event].needsSignaled) {
4187                            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4188                                            VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, 0, DRAWSTATE_INVALID_QUERY, "DS",
4189                                            "Cannot get query results on queryPool 0x%" PRIx64
4190                                            " with index %d which was guarded by unsignaled event 0x%" PRIx64 ".",
4191                                            (uint64_t)(queryEventsPair.first.pool), queryEventsPair.first.index, (uint64_t)(event));
4192                        }
4193                    }
4194                }
4195            }
4196        }
4197        sub_it++;
4198        queue_seq++;
4199    }
4200    for (auto qs : other_queue_seqs) {
4201        skip |= VerifyQueueStateToSeq(dev_data, getQueueState(dev_data, qs.first), qs.second);
4202    }
4203    return skip;
4204}
4205
4206// When the given fence is retired, verify outstanding queue operations through the point of the fence
4207static bool VerifyQueueStateToFence(layer_data *dev_data, VkFence fence) {
4208    auto fence_state = getFenceNode(dev_data, fence);
4209    if (VK_NULL_HANDLE != fence_state->signaler.first) {
4210        return VerifyQueueStateToSeq(dev_data, getQueueState(dev_data, fence_state->signaler.first), fence_state->signaler.second);
4211    }
4212    return false;
4213}
4214
4215// TODO: nuke this completely.
4216// Decrement cmd_buffer in_use and if it goes to 0 remove cmd_buffer from globalInFlightCmdBuffers
4217static inline void removeInFlightCmdBuffer(layer_data *dev_data, VkCommandBuffer cmd_buffer) {
4218    // Pull it off of global list initially, but if we find it in any other queue list, add it back in
4219    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmd_buffer);
4220    pCB->in_use.fetch_sub(1);
4221    if (!pCB->in_use.load()) {
4222        dev_data->globalInFlightCmdBuffers.erase(cmd_buffer);
4223    }
4224}
4225
4226// Decrement in-use count for objects bound to command buffer
4227static void DecrementBoundResources(layer_data *dev_data, GLOBAL_CB_NODE const *cb_node) {
4228    BASE_NODE *base_obj = nullptr;
4229    for (auto obj : cb_node->object_bindings) {
4230        base_obj = GetStateStructPtrFromObject(dev_data, obj);
4231        if (base_obj) {
4232            base_obj->in_use.fetch_sub(1);
4233        }
4234    }
4235}
4236
4237static void RetireWorkOnQueue(layer_data *dev_data, QUEUE_STATE *pQueue, uint64_t seq) {
4238    std::unordered_map<VkQueue, uint64_t> otherQueueSeqs;
4239
4240    // Roll this queue forward, one submission at a time.
4241    while (pQueue->seq < seq) {
4242        auto &submission = pQueue->submissions.front();
4243
4244        for (auto &wait : submission.waitSemaphores) {
4245            auto pSemaphore = getSemaphoreNode(dev_data, wait.semaphore);
4246            if (pSemaphore) {
4247                pSemaphore->in_use.fetch_sub(1);
4248            }
4249            auto &lastSeq = otherQueueSeqs[wait.queue];
4250            lastSeq = std::max(lastSeq, wait.seq);
4251        }
4252
4253        for (auto &semaphore : submission.signalSemaphores) {
4254            auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
4255            if (pSemaphore) {
4256                pSemaphore->in_use.fetch_sub(1);
4257            }
4258        }
4259
4260        for (auto cb : submission.cbs) {
4261            auto cb_node = getCBNode(dev_data, cb);
4262            if (!cb_node) {
4263                continue;
4264            }
4265            // First perform decrement on general case bound objects
4266            DecrementBoundResources(dev_data, cb_node);
4267            for (auto drawDataElement : cb_node->drawData) {
4268                for (auto buffer : drawDataElement.buffers) {
4269                    auto buffer_state = getBufferState(dev_data, buffer);
4270                    if (buffer_state) {
4271                        buffer_state->in_use.fetch_sub(1);
4272                    }
4273                }
4274            }
4275            for (auto event : cb_node->writeEventsBeforeWait) {
4276                auto eventNode = dev_data->eventMap.find(event);
4277                if (eventNode != dev_data->eventMap.end()) {
4278                    eventNode->second.write_in_use--;
4279                }
4280            }
4281            for (auto queryStatePair : cb_node->queryToStateMap) {
4282                dev_data->queryToStateMap[queryStatePair.first] = queryStatePair.second;
4283            }
4284            for (auto eventStagePair : cb_node->eventToStageMap) {
4285                dev_data->eventMap[eventStagePair.first].stageMask = eventStagePair.second;
4286            }
4287
4288            removeInFlightCmdBuffer(dev_data, cb);
4289        }
4290
4291        auto pFence = getFenceNode(dev_data, submission.fence);
4292        if (pFence) {
4293            pFence->state = FENCE_RETIRED;
4294        }
4295
4296        pQueue->submissions.pop_front();
4297        pQueue->seq++;
4298    }
4299
4300    // Roll other queues forward to the highest seq we saw a wait for
4301    for (auto qs : otherQueueSeqs) {
4302        RetireWorkOnQueue(dev_data, getQueueState(dev_data, qs.first), qs.second);
4303    }
4304}
4305
4306// Submit a fence to a queue, delimiting previous fences and previous untracked
4307// work by it.
4308static void SubmitFence(QUEUE_STATE *pQueue, FENCE_NODE *pFence, uint64_t submitCount) {
4309    pFence->state = FENCE_INFLIGHT;
4310    pFence->signaler.first = pQueue->queue;
4311    pFence->signaler.second = pQueue->seq + pQueue->submissions.size() + submitCount;
4312}
4313
4314static bool validateCommandBufferSimultaneousUse(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
4315    bool skip_call = false;
4316    if (dev_data->globalInFlightCmdBuffers.count(pCB->commandBuffer) &&
4317        !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
4318        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4319                             0, __LINE__, VALIDATION_ERROR_00133, "DS",
4320                             "Command Buffer 0x%p is already in use and is not marked for simultaneous use. %s", pCB->commandBuffer,
4321                             validation_error_map[VALIDATION_ERROR_00133]);
4322    }
4323    return skip_call;
4324}
4325
4326static bool validateCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const char *call_source) {
4327    bool skip = false;
4328    if (dev_data->instance_data->disabled.command_buffer_state) return skip;
4329    // Validate ONE_TIME_SUBMIT_BIT CB is not being submitted more than once
4330    if ((pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) && (pCB->submitCount > 1)) {
4331        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4332                        __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
4333                        "Commandbuffer 0x%p was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT "
4334                        "set, but has been submitted 0x%" PRIxLEAST64 " times.",
4335                        pCB->commandBuffer, pCB->submitCount);
4336    }
4337    // Validate that cmd buffers have been updated
4338    if (CB_RECORDED != pCB->state) {
4339        if (CB_INVALID == pCB->state) {
4340            // Inform app of reason CB invalid
4341            for (auto obj : pCB->broken_bindings) {
4342                const char *type_str = object_type_to_string(obj.type);
4343                // Descriptor sets are a special case that can be either destroyed or updated to invalidated a CB
4344                const char *cause_str =
4345                    (obj.type == VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT) ? "destroyed or updated" : "destroyed";
4346
4347                skip |=
4348                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4349                            reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4350                            "You are submitting command buffer 0x%p that is invalid because bound %s 0x%" PRIxLEAST64 " was %s.",
4351                            pCB->commandBuffer, type_str, obj.handle, cause_str);
4352            }
4353        } else {  // Flag error for using CB w/o vkEndCommandBuffer() called
4354            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4355                            (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_NO_END_COMMAND_BUFFER, "DS",
4356                            "You must call vkEndCommandBuffer() on command buffer 0x%p before this call to %s!", pCB->commandBuffer,
4357                            call_source);
4358        }
4359    }
4360    return skip;
4361}
4362
4363// Validate that queueFamilyIndices of primary command buffers match this queue
4364// Secondary command buffers were previously validated in vkCmdExecuteCommands().
4365static bool validateQueueFamilyIndices(layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkQueue queue) {
4366    bool skip_call = false;
4367    auto pPool = getCommandPoolNode(dev_data, pCB->createInfo.commandPool);
4368    auto queue_state = getQueueState(dev_data, queue);
4369
4370    if (pPool && queue_state && (pPool->queueFamilyIndex != queue_state->queueFamilyIndex)) {
4371        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4372                             reinterpret_cast<uint64_t>(pCB->commandBuffer), __LINE__, VALIDATION_ERROR_00139, "DS",
4373                             "vkQueueSubmit: Primary command buffer 0x%p created in queue family %d is being submitted on queue "
4374                             "0x%p from queue family %d. %s",
4375                             pCB->commandBuffer, pPool->queueFamilyIndex, queue, queue_state->queueFamilyIndex,
4376                             validation_error_map[VALIDATION_ERROR_00139]);
4377    }
4378
4379    return skip_call;
4380}
4381
4382static bool validatePrimaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
4383    // Track in-use for resources off of primary and any secondary CBs
4384    bool skip_call = false;
4385
4386    // If USAGE_SIMULTANEOUS_USE_BIT not set then CB cannot already be executing
4387    // on device
4388    skip_call |= validateCommandBufferSimultaneousUse(dev_data, pCB);
4389
4390    skip_call |= validateAndIncrementResources(dev_data, pCB);
4391
4392    if (!pCB->secondaryCommandBuffers.empty()) {
4393        for (auto secondaryCmdBuffer : pCB->secondaryCommandBuffers) {
4394            GLOBAL_CB_NODE *pSubCB = getCBNode(dev_data, secondaryCmdBuffer);
4395            skip_call |= validateAndIncrementResources(dev_data, pSubCB);
4396            if ((pSubCB->primaryCommandBuffer != pCB->commandBuffer) &&
4397                !(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
4398                log_msg(
4399                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4400                    __LINE__, VALIDATION_ERROR_00135, "DS",
4401                    "Commandbuffer 0x%p was submitted with secondary buffer 0x%p but that buffer has subsequently been bound to "
4402                    "primary cmd buffer 0x%p and it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set. %s",
4403                    pCB->commandBuffer, secondaryCmdBuffer, pSubCB->primaryCommandBuffer,
4404                    validation_error_map[VALIDATION_ERROR_00135]);
4405            }
4406        }
4407    }
4408
4409    skip_call |= validateCommandBufferState(dev_data, pCB, "vkQueueSubmit()");
4410
4411    return skip_call;
4412}
4413
4414static bool ValidateFenceForSubmit(layer_data *dev_data, FENCE_NODE *pFence) {
4415    bool skip_call = false;
4416
4417    if (pFence) {
4418        if (pFence->state == FENCE_INFLIGHT) {
4419            // TODO: opportunities for VALIDATION_ERROR_00127, VALIDATION_ERROR_01647, VALIDATION_ERROR_01953
4420            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4421                                 (uint64_t)(pFence->fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
4422                                 "Fence 0x%" PRIx64 " is already in use by another submission.", (uint64_t)(pFence->fence));
4423        }
4424
4425        else if (pFence->state == FENCE_RETIRED) {
4426            // TODO: opportunities for VALIDATION_ERROR_00126, VALIDATION_ERROR_01646, VALIDATION_ERROR_01953
4427            skip_call |=
4428                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4429                        reinterpret_cast<uint64_t &>(pFence->fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
4430                        "Fence 0x%" PRIxLEAST64 " submitted in SIGNALED state.  Fences must be reset before being submitted",
4431                        reinterpret_cast<uint64_t &>(pFence->fence));
4432        }
4433    }
4434
4435    return skip_call;
4436}
4437
4438VKAPI_ATTR VkResult VKAPI_CALL QueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
4439    bool skip_call = false;
4440    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
4441    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
4442    std::unique_lock<std::mutex> lock(global_lock);
4443
4444    auto pQueue = getQueueState(dev_data, queue);
4445    auto pFence = getFenceNode(dev_data, fence);
4446    skip_call |= ValidateFenceForSubmit(dev_data, pFence);
4447
4448    if (skip_call) {
4449        return VK_ERROR_VALIDATION_FAILED_EXT;
4450    }
4451
4452    // Mark the fence in-use.
4453    if (pFence) {
4454        SubmitFence(pQueue, pFence, std::max(1u, submitCount));
4455    }
4456
4457    // Now verify each individual submit
4458    for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
4459        const VkSubmitInfo *submit = &pSubmits[submit_idx];
4460        vector<SEMAPHORE_WAIT> semaphore_waits;
4461        vector<VkSemaphore> semaphore_signals;
4462        for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
4463            skip_call |= ValidateStageMaskGsTsEnables(dev_data, submit->pWaitDstStageMask[i], "vkQueueSubmit()",
4464                                                      VALIDATION_ERROR_00142, VALIDATION_ERROR_00143);
4465            VkSemaphore semaphore = submit->pWaitSemaphores[i];
4466            auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
4467            if (pSemaphore) {
4468                if (pSemaphore->signaled) {
4469                    if (pSemaphore->signaler.first != VK_NULL_HANDLE) {
4470                        semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second});
4471                        pSemaphore->in_use.fetch_add(1);
4472                    }
4473                    pSemaphore->signaler.first = VK_NULL_HANDLE;
4474                    pSemaphore->signaled = false;
4475                } else {
4476                    skip_call |=
4477                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
4478                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
4479                                "Queue 0x%p is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.", queue,
4480                                reinterpret_cast<const uint64_t &>(semaphore));
4481                }
4482            }
4483        }
4484        for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
4485            VkSemaphore semaphore = submit->pSignalSemaphores[i];
4486            auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
4487            if (pSemaphore) {
4488                if (pSemaphore->signaled) {
4489                    skip_call |=
4490                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
4491                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
4492                                "Queue 0x%p is signaling semaphore 0x%" PRIx64
4493                                " that has already been signaled but not waited on by queue 0x%" PRIx64 ".",
4494                                queue, reinterpret_cast<const uint64_t &>(semaphore),
4495                                reinterpret_cast<uint64_t &>(pSemaphore->signaler.first));
4496                } else {
4497                    pSemaphore->signaler.first = queue;
4498                    pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1;
4499                    pSemaphore->signaled = true;
4500                    pSemaphore->in_use.fetch_add(1);
4501                    semaphore_signals.push_back(semaphore);
4502                }
4503            }
4504        }
4505
4506        std::vector<VkCommandBuffer> cbs;
4507
4508        for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
4509            auto cb_node = getCBNode(dev_data, submit->pCommandBuffers[i]);
4510            skip_call |= ValidateCmdBufImageLayouts(dev_data, cb_node);
4511            if (cb_node) {
4512                cbs.push_back(submit->pCommandBuffers[i]);
4513                for (auto secondaryCmdBuffer : cb_node->secondaryCommandBuffers) {
4514                    cbs.push_back(secondaryCmdBuffer);
4515                }
4516
4517                cb_node->submitCount++;  // increment submit count
4518                skip_call |= validatePrimaryCommandBufferState(dev_data, cb_node);
4519                skip_call |= validateQueueFamilyIndices(dev_data, cb_node, queue);
4520                // Potential early exit here as bad object state may crash in delayed function calls
4521                if (skip_call) return result;
4522                // Call submit-time functions to validate/update state
4523                for (auto &function : cb_node->validate_functions) {
4524                    skip_call |= function();
4525                }
4526                for (auto &function : cb_node->eventUpdates) {
4527                    skip_call |= function(queue);
4528                }
4529                for (auto &function : cb_node->queryUpdates) {
4530                    skip_call |= function(queue);
4531                }
4532            }
4533        }
4534
4535        pQueue->submissions.emplace_back(cbs, semaphore_waits, semaphore_signals,
4536                                         submit_idx == submitCount - 1 ? fence : VK_NULL_HANDLE);
4537    }
4538
4539    if (pFence && !submitCount) {
4540        // If no submissions, but just dropping a fence on the end of the queue,
4541        // record an empty submission with just the fence, so we can determine
4542        // its completion.
4543        pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(), std::vector<SEMAPHORE_WAIT>(), std::vector<VkSemaphore>(),
4544                                         fence);
4545    }
4546
4547    lock.unlock();
4548    if (!skip_call) result = dev_data->dispatch_table.QueueSubmit(queue, submitCount, pSubmits, fence);
4549
4550    return result;
4551}
4552
4553static bool PreCallValidateAllocateMemory(layer_data *dev_data) {
4554    bool skip = false;
4555    if (dev_data->memObjMap.size() >= dev_data->phys_dev_properties.properties.limits.maxMemoryAllocationCount) {
4556        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
4557                        reinterpret_cast<const uint64_t &>(dev_data->device), __LINE__, VALIDATION_ERROR_00611, "MEM",
4558                        "Number of currently valid memory objects is not less than the maximum allowed (%u). %s",
4559                        dev_data->phys_dev_properties.properties.limits.maxMemoryAllocationCount,
4560                        validation_error_map[VALIDATION_ERROR_00611]);
4561    }
4562    return skip;
4563}
4564
4565static void PostCallRecordAllocateMemory(layer_data *dev_data, const VkMemoryAllocateInfo *pAllocateInfo, VkDeviceMemory *pMemory) {
4566    add_mem_obj_info(dev_data, dev_data->device, *pMemory, pAllocateInfo);
4567    return;
4568}
4569
4570VKAPI_ATTR VkResult VKAPI_CALL AllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
4571                                              const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) {
4572    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
4573    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4574    std::unique_lock<std::mutex> lock(global_lock);
4575    bool skip = PreCallValidateAllocateMemory(dev_data);
4576    if (!skip) {
4577        lock.unlock();
4578        result = dev_data->dispatch_table.AllocateMemory(device, pAllocateInfo, pAllocator, pMemory);
4579        lock.lock();
4580        if (VK_SUCCESS == result) {
4581            PostCallRecordAllocateMemory(dev_data, pAllocateInfo, pMemory);
4582        }
4583    }
4584    return result;
4585}
4586
4587// For given obj node, if it is use, flag a validation error and return callback result, else return false
4588bool ValidateObjectNotInUse(const layer_data *dev_data, BASE_NODE *obj_node, VK_OBJECT obj_struct,
4589                            UNIQUE_VALIDATION_ERROR_CODE error_code) {
4590    if (dev_data->instance_data->disabled.object_in_use) return false;
4591    bool skip = false;
4592    if (obj_node->in_use.load()) {
4593        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_struct.type, obj_struct.handle, __LINE__,
4594                        error_code, "DS", "Cannot delete %s 0x%" PRIx64 " that is currently in use by a command buffer. %s",
4595                        object_type_to_string(obj_struct.type), obj_struct.handle, validation_error_map[error_code]);
4596    }
4597    return skip;
4598}
4599
4600static bool PreCallValidateFreeMemory(layer_data *dev_data, VkDeviceMemory mem, DEVICE_MEM_INFO **mem_info, VK_OBJECT *obj_struct) {
4601    *mem_info = getMemObjInfo(dev_data, mem);
4602    *obj_struct = {reinterpret_cast<uint64_t &>(mem), VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT};
4603    if (dev_data->instance_data->disabled.free_memory) return false;
4604    bool skip = false;
4605    if (*mem_info) {
4606        skip |= ValidateObjectNotInUse(dev_data, *mem_info, *obj_struct, VALIDATION_ERROR_00620);
4607    }
4608    return skip;
4609}
4610
4611static void PostCallRecordFreeMemory(layer_data *dev_data, VkDeviceMemory mem, DEVICE_MEM_INFO *mem_info, VK_OBJECT obj_struct) {
4612    // Clear mem binding for any bound objects
4613    for (auto obj : mem_info->obj_bindings) {
4614        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, obj.type, obj.handle, __LINE__, MEMTRACK_FREED_MEM_REF,
4615                "MEM", "VK Object 0x%" PRIxLEAST64 " still has a reference to mem obj 0x%" PRIxLEAST64, obj.handle,
4616                (uint64_t)mem_info->mem);
4617        switch (obj.type) {
4618            case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
4619                auto image_state = getImageState(dev_data, reinterpret_cast<VkImage &>(obj.handle));
4620                assert(image_state);  // Any destroyed images should already be removed from bindings
4621                image_state->binding.mem = MEMORY_UNBOUND;
4622                break;
4623            }
4624            case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
4625                auto buffer_state = getBufferState(dev_data, reinterpret_cast<VkBuffer &>(obj.handle));
4626                assert(buffer_state);  // Any destroyed buffers should already be removed from bindings
4627                buffer_state->binding.mem = MEMORY_UNBOUND;
4628                break;
4629            }
4630            default:
4631                // Should only have buffer or image objects bound to memory
4632                assert(0);
4633        }
4634    }
4635    // Any bound cmd buffers are now invalid
4636    invalidateCommandBuffers(dev_data, mem_info->cb_bindings, obj_struct);
4637    dev_data->memObjMap.erase(mem);
4638}
4639
4640VKAPI_ATTR void VKAPI_CALL FreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
4641    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4642    DEVICE_MEM_INFO *mem_info = nullptr;
4643    VK_OBJECT obj_struct;
4644    std::unique_lock<std::mutex> lock(global_lock);
4645    bool skip = PreCallValidateFreeMemory(dev_data, mem, &mem_info, &obj_struct);
4646    if (!skip) {
4647        lock.unlock();
4648        dev_data->dispatch_table.FreeMemory(device, mem, pAllocator);
4649        lock.lock();
4650        if (mem != VK_NULL_HANDLE) {
4651            PostCallRecordFreeMemory(dev_data, mem, mem_info, obj_struct);
4652        }
4653    }
4654}
4655
4656// Validate that given Map memory range is valid. This means that the memory should not already be mapped,
4657//  and that the size of the map range should be:
4658//  1. Not zero
4659//  2. Within the size of the memory allocation
4660static bool ValidateMapMemRange(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
4661    bool skip_call = false;
4662
4663    if (size == 0) {
4664        skip_call = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
4665                            (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
4666                            "VkMapMemory: Attempting to map memory range of size zero");
4667    }
4668
4669    auto mem_element = dev_data->memObjMap.find(mem);
4670    if (mem_element != dev_data->memObjMap.end()) {
4671        auto mem_info = mem_element->second.get();
4672        // It is an application error to call VkMapMemory on an object that is already mapped
4673        if (mem_info->mem_range.size != 0) {
4674            skip_call = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
4675                                (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
4676                                "VkMapMemory: Attempting to map memory on an already-mapped object 0x%" PRIxLEAST64, (uint64_t)mem);
4677        }
4678
4679        // Validate that offset + size is within object's allocationSize
4680        if (size == VK_WHOLE_SIZE) {
4681            if (offset >= mem_info->alloc_info.allocationSize) {
4682                skip_call =
4683                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
4684                            (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
4685                            "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64
4686                            " with size of VK_WHOLE_SIZE oversteps total array size 0x%" PRIx64,
4687                            offset, mem_info->alloc_info.allocationSize, mem_info->alloc_info.allocationSize);
4688            }
4689        } else {
4690            if ((offset + size) > mem_info->alloc_info.allocationSize) {
4691                skip_call = log_msg(
4692                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
4693                    (uint64_t)mem, __LINE__, VALIDATION_ERROR_00628, "MEM",
4694                    "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64 " oversteps total array size 0x%" PRIx64 ". %s", offset,
4695                    size + offset, mem_info->alloc_info.allocationSize, validation_error_map[VALIDATION_ERROR_00628]);
4696            }
4697        }
4698    }
4699    return skip_call;
4700}
4701
4702static void storeMemRanges(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
4703    auto mem_info = getMemObjInfo(dev_data, mem);
4704    if (mem_info) {
4705        mem_info->mem_range.offset = offset;
4706        mem_info->mem_range.size = size;
4707    }
4708}
4709
4710static bool deleteMemRanges(layer_data *dev_data, VkDeviceMemory mem) {
4711    bool skip_call = false;
4712    auto mem_info = getMemObjInfo(dev_data, mem);
4713    if (mem_info) {
4714        if (!mem_info->mem_range.size) {
4715            // Valid Usage: memory must currently be mapped
4716            skip_call = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
4717                                (uint64_t)mem, __LINE__, VALIDATION_ERROR_00649, "MEM",
4718                                "Unmapping Memory without memory being mapped: mem obj 0x%" PRIxLEAST64 ". %s", (uint64_t)mem,
4719                                validation_error_map[VALIDATION_ERROR_00649]);
4720        }
4721        mem_info->mem_range.size = 0;
4722        if (mem_info->shadow_copy) {
4723            free(mem_info->shadow_copy_base);
4724            mem_info->shadow_copy_base = 0;
4725            mem_info->shadow_copy = 0;
4726        }
4727    }
4728    return skip_call;
4729}
4730
4731// Guard value for pad data
4732static char NoncoherentMemoryFillValue = 0xb;
4733
4734static void initializeAndTrackMemory(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size,
4735                                     void **ppData) {
4736    auto mem_info = getMemObjInfo(dev_data, mem);
4737    if (mem_info) {
4738        mem_info->p_driver_data = *ppData;
4739        uint32_t index = mem_info->alloc_info.memoryTypeIndex;
4740        if (dev_data->phys_dev_mem_props.memoryTypes[index].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) {
4741            mem_info->shadow_copy = 0;
4742        } else {
4743            if (size == VK_WHOLE_SIZE) {
4744                size = mem_info->alloc_info.allocationSize - offset;
4745            }
4746            mem_info->shadow_pad_size = dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment;
4747            assert(vk_safe_modulo(mem_info->shadow_pad_size,
4748                                  dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment) == 0);
4749            // Ensure start of mapped region reflects hardware alignment constraints
4750            uint64_t map_alignment = dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment;
4751
4752            // From spec: (ppData - offset) must be aligned to at least limits::minMemoryMapAlignment.
4753            uint64_t start_offset = offset % map_alignment;
4754            // Data passed to driver will be wrapped by a guardband of data to detect over- or under-writes.
4755            mem_info->shadow_copy_base =
4756                malloc(static_cast<size_t>(2 * mem_info->shadow_pad_size + size + map_alignment + start_offset));
4757
4758            mem_info->shadow_copy =
4759                reinterpret_cast<char *>((reinterpret_cast<uintptr_t>(mem_info->shadow_copy_base) + map_alignment) &
4760                                         ~(map_alignment - 1)) +
4761                start_offset;
4762            assert(vk_safe_modulo(reinterpret_cast<uintptr_t>(mem_info->shadow_copy) + mem_info->shadow_pad_size - start_offset,
4763                                  map_alignment) == 0);
4764
4765            memset(mem_info->shadow_copy, NoncoherentMemoryFillValue, static_cast<size_t>(2 * mem_info->shadow_pad_size + size));
4766            *ppData = static_cast<char *>(mem_info->shadow_copy) + mem_info->shadow_pad_size;
4767        }
4768    }
4769}
4770
4771// Verify that state for fence being waited on is appropriate. That is,
4772//  a fence being waited on should not already be signaled and
4773//  it should have been submitted on a queue or during acquire next image
4774static inline bool verifyWaitFenceState(layer_data *dev_data, VkFence fence, const char *apiCall) {
4775    bool skip_call = false;
4776
4777    auto pFence = getFenceNode(dev_data, fence);
4778    if (pFence) {
4779        if (pFence->state == FENCE_UNSIGNALED) {
4780            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4781                                 reinterpret_cast<uint64_t &>(fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
4782                                 "%s called for fence 0x%" PRIxLEAST64
4783                                 " which has not been submitted on a Queue or during "
4784                                 "acquire next image.",
4785                                 apiCall, reinterpret_cast<uint64_t &>(fence));
4786        }
4787    }
4788    return skip_call;
4789}
4790
4791static void RetireFence(layer_data *dev_data, VkFence fence) {
4792    auto pFence = getFenceNode(dev_data, fence);
4793    if (pFence->signaler.first != VK_NULL_HANDLE) {
4794        // Fence signaller is a queue -- use this as proof that prior operations on that queue have completed.
4795        RetireWorkOnQueue(dev_data, getQueueState(dev_data, pFence->signaler.first), pFence->signaler.second);
4796    } else {
4797        // Fence signaller is the WSI. We're not tracking what the WSI op actually /was/ in CV yet, but we need to mark
4798        // the fence as retired.
4799        pFence->state = FENCE_RETIRED;
4800    }
4801}
4802
4803static bool PreCallValidateWaitForFences(layer_data *dev_data, uint32_t fence_count, const VkFence *fences) {
4804    if (dev_data->instance_data->disabled.wait_for_fences) return false;
4805    bool skip = false;
4806    for (uint32_t i = 0; i < fence_count; i++) {
4807        skip |= verifyWaitFenceState(dev_data, fences[i], "vkWaitForFences");
4808        skip |= VerifyQueueStateToFence(dev_data, fences[i]);
4809    }
4810    return skip;
4811}
4812
4813static void PostCallRecordWaitForFences(layer_data *dev_data, uint32_t fence_count, const VkFence *fences, VkBool32 wait_all) {
4814    // When we know that all fences are complete we can clean/remove their CBs
4815    if ((VK_TRUE == wait_all) || (1 == fence_count)) {
4816        for (uint32_t i = 0; i < fence_count; i++) {
4817            RetireFence(dev_data, fences[i]);
4818        }
4819    }
4820    // NOTE : Alternate case not handled here is when some fences have completed. In
4821    //  this case for app to guarantee which fences completed it will have to call
4822    //  vkGetFenceStatus() at which point we'll clean/remove their CBs if complete.
4823}
4824
4825VKAPI_ATTR VkResult VKAPI_CALL WaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll,
4826                                             uint64_t timeout) {
4827    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4828    // Verify fence status of submitted fences
4829    std::unique_lock<std::mutex> lock(global_lock);
4830    bool skip = PreCallValidateWaitForFences(dev_data, fenceCount, pFences);
4831    lock.unlock();
4832    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4833
4834    VkResult result = dev_data->dispatch_table.WaitForFences(device, fenceCount, pFences, waitAll, timeout);
4835
4836    if (result == VK_SUCCESS) {
4837        lock.lock();
4838        PostCallRecordWaitForFences(dev_data, fenceCount, pFences, waitAll);
4839        lock.unlock();
4840    }
4841    return result;
4842}
4843
4844static bool PreCallValidateGetFenceStatus(layer_data *dev_data, VkFence fence) {
4845    if (dev_data->instance_data->disabled.get_fence_state) return false;
4846    return verifyWaitFenceState(dev_data, fence, "vkGetFenceStatus");
4847}
4848
4849static void PostCallRecordGetFenceStatus(layer_data *dev_data, VkFence fence) { RetireFence(dev_data, fence); }
4850
4851VKAPI_ATTR VkResult VKAPI_CALL GetFenceStatus(VkDevice device, VkFence fence) {
4852    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4853    std::unique_lock<std::mutex> lock(global_lock);
4854    bool skip = PreCallValidateGetFenceStatus(dev_data, fence);
4855    lock.unlock();
4856    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4857
4858    VkResult result = dev_data->dispatch_table.GetFenceStatus(device, fence);
4859    if (result == VK_SUCCESS) {
4860        lock.lock();
4861        PostCallRecordGetFenceStatus(dev_data, fence);
4862        lock.unlock();
4863    }
4864    return result;
4865}
4866
4867static void PostCallRecordGetDeviceQueue(layer_data *dev_data, uint32_t q_family_index, VkQueue queue) {
4868    // Add queue to tracking set only if it is new
4869    auto result = dev_data->queues.emplace(queue);
4870    if (result.second == true) {
4871        QUEUE_STATE *queue_state = &dev_data->queueMap[queue];
4872        queue_state->queue = queue;
4873        queue_state->queueFamilyIndex = q_family_index;
4874        queue_state->seq = 0;
4875    }
4876}
4877
4878VKAPI_ATTR void VKAPI_CALL GetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue *pQueue) {
4879    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4880    dev_data->dispatch_table.GetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue);
4881    std::lock_guard<std::mutex> lock(global_lock);
4882
4883    PostCallRecordGetDeviceQueue(dev_data, queueFamilyIndex, *pQueue);
4884}
4885
4886static bool PreCallValidateQueueWaitIdle(layer_data *dev_data, VkQueue queue, QUEUE_STATE **queue_state) {
4887    *queue_state = getQueueState(dev_data, queue);
4888    if (dev_data->instance_data->disabled.queue_wait_idle) return false;
4889    return VerifyQueueStateToSeq(dev_data, *queue_state, (*queue_state)->seq + (*queue_state)->submissions.size());
4890}
4891
4892static void PostCallRecordQueueWaitIdle(layer_data *dev_data, QUEUE_STATE *queue_state) {
4893    RetireWorkOnQueue(dev_data, queue_state, queue_state->seq + queue_state->submissions.size());
4894}
4895
4896VKAPI_ATTR VkResult VKAPI_CALL QueueWaitIdle(VkQueue queue) {
4897    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
4898    QUEUE_STATE *queue_state = nullptr;
4899    std::unique_lock<std::mutex> lock(global_lock);
4900    bool skip = PreCallValidateQueueWaitIdle(dev_data, queue, &queue_state);
4901    lock.unlock();
4902    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4903    VkResult result = dev_data->dispatch_table.QueueWaitIdle(queue);
4904    if (VK_SUCCESS == result) {
4905        lock.lock();
4906        PostCallRecordQueueWaitIdle(dev_data, queue_state);
4907        lock.unlock();
4908    }
4909    return result;
4910}
4911
4912static bool PreCallValidateDeviceWaitIdle(layer_data *dev_data) {
4913    if (dev_data->instance_data->disabled.device_wait_idle) return false;
4914    bool skip = false;
4915    for (auto &queue : dev_data->queueMap) {
4916        skip |= VerifyQueueStateToSeq(dev_data, &queue.second, queue.second.seq + queue.second.submissions.size());
4917    }
4918    return skip;
4919}
4920
4921static void PostCallRecordDeviceWaitIdle(layer_data *dev_data) {
4922    for (auto &queue : dev_data->queueMap) {
4923        RetireWorkOnQueue(dev_data, &queue.second, queue.second.seq + queue.second.submissions.size());
4924    }
4925}
4926
4927VKAPI_ATTR VkResult VKAPI_CALL DeviceWaitIdle(VkDevice device) {
4928    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4929    std::unique_lock<std::mutex> lock(global_lock);
4930    bool skip = PreCallValidateDeviceWaitIdle(dev_data);
4931    lock.unlock();
4932    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4933    VkResult result = dev_data->dispatch_table.DeviceWaitIdle(device);
4934    if (VK_SUCCESS == result) {
4935        lock.lock();
4936        PostCallRecordDeviceWaitIdle(dev_data);
4937        lock.unlock();
4938    }
4939    return result;
4940}
4941
4942static bool PreCallValidateDestroyFence(layer_data *dev_data, VkFence fence, FENCE_NODE **fence_node, VK_OBJECT *obj_struct) {
4943    *fence_node = getFenceNode(dev_data, fence);
4944    *obj_struct = {reinterpret_cast<uint64_t &>(fence), VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT};
4945    if (dev_data->instance_data->disabled.destroy_fence) return false;
4946    bool skip = false;
4947    if (*fence_node) {
4948        if ((*fence_node)->state == FENCE_INFLIGHT) {
4949            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4950                            (uint64_t)(fence), __LINE__, VALIDATION_ERROR_00173, "DS", "Fence 0x%" PRIx64 " is in use. %s",
4951                            (uint64_t)(fence), validation_error_map[VALIDATION_ERROR_00173]);
4952        }
4953    }
4954    return skip;
4955}
4956
4957static void PostCallRecordDestroyFence(layer_data *dev_data, VkFence fence) { dev_data->fenceMap.erase(fence); }
4958
4959VKAPI_ATTR void VKAPI_CALL DestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) {
4960    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4961    // Common data objects used pre & post call
4962    FENCE_NODE *fence_node = nullptr;
4963    VK_OBJECT obj_struct;
4964    std::unique_lock<std::mutex> lock(global_lock);
4965    bool skip = PreCallValidateDestroyFence(dev_data, fence, &fence_node, &obj_struct);
4966
4967    if (!skip) {
4968        lock.unlock();
4969        dev_data->dispatch_table.DestroyFence(device, fence, pAllocator);
4970        lock.lock();
4971        PostCallRecordDestroyFence(dev_data, fence);
4972    }
4973}
4974
4975static bool PreCallValidateDestroySemaphore(layer_data *dev_data, VkSemaphore semaphore, SEMAPHORE_NODE **sema_node,
4976                                            VK_OBJECT *obj_struct) {
4977    *sema_node = getSemaphoreNode(dev_data, semaphore);
4978    *obj_struct = {reinterpret_cast<uint64_t &>(semaphore), VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT};
4979    if (dev_data->instance_data->disabled.destroy_semaphore) return false;
4980    bool skip = false;
4981    if (*sema_node) {
4982        skip |= ValidateObjectNotInUse(dev_data, *sema_node, *obj_struct, VALIDATION_ERROR_00199);
4983    }
4984    return skip;
4985}
4986
4987static void PostCallRecordDestroySemaphore(layer_data *dev_data, VkSemaphore sema) { dev_data->semaphoreMap.erase(sema); }
4988
4989VKAPI_ATTR void VKAPI_CALL DestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) {
4990    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4991    SEMAPHORE_NODE *sema_node;
4992    VK_OBJECT obj_struct;
4993    std::unique_lock<std::mutex> lock(global_lock);
4994    bool skip = PreCallValidateDestroySemaphore(dev_data, semaphore, &sema_node, &obj_struct);
4995    if (!skip) {
4996        lock.unlock();
4997        dev_data->dispatch_table.DestroySemaphore(device, semaphore, pAllocator);
4998        lock.lock();
4999        PostCallRecordDestroySemaphore(dev_data, semaphore);
5000    }
5001}
5002
5003static bool PreCallValidateDestroyEvent(layer_data *dev_data, VkEvent event, EVENT_STATE **event_state, VK_OBJECT *obj_struct) {
5004    *event_state = getEventNode(dev_data, event);
5005    *obj_struct = {reinterpret_cast<uint64_t &>(event), VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT};
5006    if (dev_data->instance_data->disabled.destroy_event) return false;
5007    bool skip = false;
5008    if (*event_state) {
5009        skip |= ValidateObjectNotInUse(dev_data, *event_state, *obj_struct, VALIDATION_ERROR_00213);
5010    }
5011    return skip;
5012}
5013
5014static void PostCallRecordDestroyEvent(layer_data *dev_data, VkEvent event, EVENT_STATE *event_state, VK_OBJECT obj_struct) {
5015    invalidateCommandBuffers(dev_data, event_state->cb_bindings, obj_struct);
5016    dev_data->eventMap.erase(event);
5017}
5018
5019VKAPI_ATTR void VKAPI_CALL DestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
5020    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5021    EVENT_STATE *event_state = nullptr;
5022    VK_OBJECT obj_struct;
5023    std::unique_lock<std::mutex> lock(global_lock);
5024    bool skip = PreCallValidateDestroyEvent(dev_data, event, &event_state, &obj_struct);
5025    if (!skip) {
5026        lock.unlock();
5027        dev_data->dispatch_table.DestroyEvent(device, event, pAllocator);
5028        lock.lock();
5029        if (event != VK_NULL_HANDLE) {
5030            PostCallRecordDestroyEvent(dev_data, event, event_state, obj_struct);
5031        }
5032    }
5033}
5034
5035static bool PreCallValidateDestroyQueryPool(layer_data *dev_data, VkQueryPool query_pool, QUERY_POOL_NODE **qp_state,
5036                                            VK_OBJECT *obj_struct) {
5037    *qp_state = getQueryPoolNode(dev_data, query_pool);
5038    *obj_struct = {reinterpret_cast<uint64_t &>(query_pool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT};
5039    if (dev_data->instance_data->disabled.destroy_query_pool) return false;
5040    bool skip = false;
5041    if (*qp_state) {
5042        skip |= ValidateObjectNotInUse(dev_data, *qp_state, *obj_struct, VALIDATION_ERROR_01012);
5043    }
5044    return skip;
5045}
5046
5047static void PostCallRecordDestroyQueryPool(layer_data *dev_data, VkQueryPool query_pool, QUERY_POOL_NODE *qp_state,
5048                                           VK_OBJECT obj_struct) {
5049    invalidateCommandBuffers(dev_data, qp_state->cb_bindings, obj_struct);
5050    dev_data->queryPoolMap.erase(query_pool);
5051}
5052
5053VKAPI_ATTR void VKAPI_CALL DestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) {
5054    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5055    QUERY_POOL_NODE *qp_state = nullptr;
5056    VK_OBJECT obj_struct;
5057    std::unique_lock<std::mutex> lock(global_lock);
5058    bool skip = PreCallValidateDestroyQueryPool(dev_data, queryPool, &qp_state, &obj_struct);
5059    if (!skip) {
5060        lock.unlock();
5061        dev_data->dispatch_table.DestroyQueryPool(device, queryPool, pAllocator);
5062        lock.lock();
5063        if (queryPool != VK_NULL_HANDLE) {
5064            PostCallRecordDestroyQueryPool(dev_data, queryPool, qp_state, obj_struct);
5065        }
5066    }
5067}
5068static bool PreCallValidateGetQueryPoolResults(layer_data *dev_data, VkQueryPool query_pool, uint32_t first_query,
5069                                               uint32_t query_count, VkQueryResultFlags flags,
5070                                               unordered_map<QueryObject, vector<VkCommandBuffer>> *queries_in_flight) {
5071    for (auto cmd_buffer : dev_data->globalInFlightCmdBuffers) {
5072        auto cb = getCBNode(dev_data, cmd_buffer);
5073        for (auto query_state_pair : cb->queryToStateMap) {
5074            (*queries_in_flight)[query_state_pair.first].push_back(cmd_buffer);
5075        }
5076    }
5077    if (dev_data->instance_data->disabled.get_query_pool_results) return false;
5078    bool skip = false;
5079    for (uint32_t i = 0; i < query_count; ++i) {
5080        QueryObject query = {query_pool, first_query + i};
5081        auto qif_pair = queries_in_flight->find(query);
5082        auto query_state_pair = dev_data->queryToStateMap.find(query);
5083        if (query_state_pair != dev_data->queryToStateMap.end()) {
5084            // Available and in flight
5085            if (qif_pair != queries_in_flight->end() && query_state_pair != dev_data->queryToStateMap.end() &&
5086                query_state_pair->second) {
5087                for (auto cmd_buffer : qif_pair->second) {
5088                    auto cb = getCBNode(dev_data, cmd_buffer);
5089                    auto query_event_pair = cb->waitedEventsBeforeQueryReset.find(query);
5090                    if (query_event_pair == cb->waitedEventsBeforeQueryReset.end()) {
5091                        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5092                                        VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5093                                        "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is in flight.",
5094                                        (uint64_t)(query_pool), first_query + i);
5095                    }
5096                }
5097                // Unavailable and in flight
5098            } else if (qif_pair != queries_in_flight->end() && query_state_pair != dev_data->queryToStateMap.end() &&
5099                       !query_state_pair->second) {
5100                // TODO : Can there be the same query in use by multiple command buffers in flight?
5101                bool make_available = false;
5102                for (auto cmd_buffer : qif_pair->second) {
5103                    auto cb = getCBNode(dev_data, cmd_buffer);
5104                    make_available |= cb->queryToStateMap[query];
5105                }
5106                if (!(((flags & VK_QUERY_RESULT_PARTIAL_BIT) || (flags & VK_QUERY_RESULT_WAIT_BIT)) && make_available)) {
5107                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5108                                    VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5109                                    "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is unavailable.",
5110                                    (uint64_t)(query_pool), first_query + i);
5111                }
5112                // Unavailable
5113            } else if (query_state_pair != dev_data->queryToStateMap.end() && !query_state_pair->second) {
5114                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
5115                                __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5116                                "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is unavailable.",
5117                                (uint64_t)(query_pool), first_query + i);
5118                // Uninitialized
5119            } else if (query_state_pair == dev_data->queryToStateMap.end()) {
5120                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
5121                                __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5122                                "Cannot get query results on queryPool 0x%" PRIx64
5123                                " with index %d as data has not been collected for this index.",
5124                                (uint64_t)(query_pool), first_query + i);
5125            }
5126        }
5127    }
5128    return skip;
5129}
5130
5131static void PostCallRecordGetQueryPoolResults(layer_data *dev_data, VkQueryPool query_pool, uint32_t first_query,
5132                                              uint32_t query_count,
5133                                              unordered_map<QueryObject, vector<VkCommandBuffer>> *queries_in_flight) {
5134    for (uint32_t i = 0; i < query_count; ++i) {
5135        QueryObject query = {query_pool, first_query + i};
5136        auto qif_pair = queries_in_flight->find(query);
5137        auto query_state_pair = dev_data->queryToStateMap.find(query);
5138        if (query_state_pair != dev_data->queryToStateMap.end()) {
5139            // Available and in flight
5140            if (qif_pair != queries_in_flight->end() && query_state_pair != dev_data->queryToStateMap.end() &&
5141                query_state_pair->second) {
5142                for (auto cmd_buffer : qif_pair->second) {
5143                    auto cb = getCBNode(dev_data, cmd_buffer);
5144                    auto query_event_pair = cb->waitedEventsBeforeQueryReset.find(query);
5145                    if (query_event_pair != cb->waitedEventsBeforeQueryReset.end()) {
5146                        for (auto event : query_event_pair->second) {
5147                            dev_data->eventMap[event].needsSignaled = true;
5148                        }
5149                    }
5150                }
5151            }
5152        }
5153    }
5154}
5155
5156VKAPI_ATTR VkResult VKAPI_CALL GetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
5157                                                   size_t dataSize, void *pData, VkDeviceSize stride, VkQueryResultFlags flags) {
5158    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5159    unordered_map<QueryObject, vector<VkCommandBuffer>> queries_in_flight;
5160    std::unique_lock<std::mutex> lock(global_lock);
5161    bool skip = PreCallValidateGetQueryPoolResults(dev_data, queryPool, firstQuery, queryCount, flags, &queries_in_flight);
5162    lock.unlock();
5163    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
5164    VkResult result =
5165        dev_data->dispatch_table.GetQueryPoolResults(device, queryPool, firstQuery, queryCount, dataSize, pData, stride, flags);
5166    lock.lock();
5167    PostCallRecordGetQueryPoolResults(dev_data, queryPool, firstQuery, queryCount, &queries_in_flight);
5168    lock.unlock();
5169    return result;
5170}
5171
5172static bool validateIdleBuffer(const layer_data *dev_data, VkBuffer buffer) {
5173    bool skip_call = false;
5174    auto buffer_state = getBufferState(dev_data, buffer);
5175    if (!buffer_state) {
5176        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5177                             (uint64_t)(buffer), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
5178                             "Cannot free buffer 0x%" PRIxLEAST64 " that has not been allocated.", (uint64_t)(buffer));
5179    } else {
5180        if (buffer_state->in_use.load()) {
5181            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5182                                 (uint64_t)(buffer), __LINE__, VALIDATION_ERROR_00676, "DS",
5183                                 "Cannot free buffer 0x%" PRIxLEAST64 " that is in use by a command buffer. %s", (uint64_t)(buffer),
5184                                 validation_error_map[VALIDATION_ERROR_00676]);
5185        }
5186    }
5187    return skip_call;
5188}
5189
5190// Return true if given ranges intersect, else false
5191// Prereq : For both ranges, range->end - range->start > 0. This case should have already resulted
5192//  in an error so not checking that here
5193// pad_ranges bool indicates a linear and non-linear comparison which requires padding
5194// In the case where padding is required, if an alias is encountered then a validation error is reported and skip_call
5195//  may be set by the callback function so caller should merge in skip_call value if padding case is possible.
5196static bool rangesIntersect(layer_data const *dev_data, MEMORY_RANGE const *range1, MEMORY_RANGE const *range2, bool *skip_call) {
5197    *skip_call = false;
5198    auto r1_start = range1->start;
5199    auto r1_end = range1->end;
5200    auto r2_start = range2->start;
5201    auto r2_end = range2->end;
5202    VkDeviceSize pad_align = 1;
5203    if (range1->linear != range2->linear) {
5204        pad_align = dev_data->phys_dev_properties.properties.limits.bufferImageGranularity;
5205    }
5206    if ((r1_end & ~(pad_align - 1)) < (r2_start & ~(pad_align - 1))) return false;
5207    if ((r1_start & ~(pad_align - 1)) > (r2_end & ~(pad_align - 1))) return false;
5208
5209    if (range1->linear != range2->linear) {
5210        // In linear vs. non-linear case, warn of aliasing
5211        const char *r1_linear_str = range1->linear ? "Linear" : "Non-linear";
5212        const char *r1_type_str = range1->image ? "image" : "buffer";
5213        const char *r2_linear_str = range2->linear ? "linear" : "non-linear";
5214        const char *r2_type_str = range2->image ? "image" : "buffer";
5215        auto obj_type = range1->image ? VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT : VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT;
5216        *skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, obj_type, range1->handle, 0,
5217                              MEMTRACK_INVALID_ALIASING, "MEM", "%s %s 0x%" PRIx64 " is aliased with %s %s 0x%" PRIx64
5218                                                                " which may indicate a bug. For further info refer to the "
5219                                                                "Buffer-Image Granularity section of the Vulkan specification. "
5220                                                                "(https://www.khronos.org/registry/vulkan/specs/1.0-extensions/"
5221                                                                "xhtml/vkspec.html#resources-bufferimagegranularity)",
5222                              r1_linear_str, r1_type_str, range1->handle, r2_linear_str, r2_type_str, range2->handle);
5223    }
5224    // Ranges intersect
5225    return true;
5226}
5227// Simplified rangesIntersect that calls above function to check range1 for intersection with offset & end addresses
5228bool rangesIntersect(layer_data const *dev_data, MEMORY_RANGE const *range1, VkDeviceSize offset, VkDeviceSize end) {
5229    // Create a local MEMORY_RANGE struct to wrap offset/size
5230    MEMORY_RANGE range_wrap;
5231    // Synch linear with range1 to avoid padding and potential validation error case
5232    range_wrap.linear = range1->linear;
5233    range_wrap.start = offset;
5234    range_wrap.end = end;
5235    bool tmp_bool;
5236    return rangesIntersect(dev_data, range1, &range_wrap, &tmp_bool);
5237}
5238// For given mem_info, set all ranges valid that intersect [offset-end] range
5239// TODO : For ranges where there is no alias, we may want to create new buffer ranges that are valid
5240static void SetMemRangesValid(layer_data const *dev_data, DEVICE_MEM_INFO *mem_info, VkDeviceSize offset, VkDeviceSize end) {
5241    bool tmp_bool = false;
5242    MEMORY_RANGE map_range = {};
5243    map_range.linear = true;
5244    map_range.start = offset;
5245    map_range.end = end;
5246    for (auto &handle_range_pair : mem_info->bound_ranges) {
5247        if (rangesIntersect(dev_data, &handle_range_pair.second, &map_range, &tmp_bool)) {
5248            // TODO : WARN here if tmp_bool true?
5249            handle_range_pair.second.valid = true;
5250        }
5251    }
5252}
5253// Object with given handle is being bound to memory w/ given mem_info struct.
5254//  Track the newly bound memory range with given memoryOffset
5255//  Also scan any previous ranges, track aliased ranges with new range, and flag an error if a linear
5256//  and non-linear range incorrectly overlap.
5257// Return true if an error is flagged and the user callback returns "true", otherwise false
5258// is_image indicates an image object, otherwise handle is for a buffer
5259// is_linear indicates a buffer or linear image
5260static bool InsertMemoryRange(layer_data const *dev_data, uint64_t handle, DEVICE_MEM_INFO *mem_info, VkDeviceSize memoryOffset,
5261                              VkMemoryRequirements memRequirements, bool is_image, bool is_linear) {
5262    bool skip_call = false;
5263    MEMORY_RANGE range;
5264
5265    range.image = is_image;
5266    range.handle = handle;
5267    range.linear = is_linear;
5268    range.valid = mem_info->global_valid;
5269    range.memory = mem_info->mem;
5270    range.start = memoryOffset;
5271    range.size = memRequirements.size;
5272    range.end = memoryOffset + memRequirements.size - 1;
5273    range.aliases.clear();
5274    // Update Memory aliasing
5275    // Save aliased ranges so we can copy into final map entry below. Can't do it in loop b/c we don't yet have final ptr. If we
5276    // inserted into map before loop to get the final ptr, then we may enter loop when not needed & we check range against itself
5277    std::unordered_set<MEMORY_RANGE *> tmp_alias_ranges;
5278    for (auto &obj_range_pair : mem_info->bound_ranges) {
5279        auto check_range = &obj_range_pair.second;
5280        bool intersection_error = false;
5281        if (rangesIntersect(dev_data, &range, check_range, &intersection_error)) {
5282            skip_call |= intersection_error;
5283            range.aliases.insert(check_range);
5284            tmp_alias_ranges.insert(check_range);
5285        }
5286    }
5287    mem_info->bound_ranges[handle] = std::move(range);
5288    for (auto tmp_range : tmp_alias_ranges) {
5289        tmp_range->aliases.insert(&mem_info->bound_ranges[handle]);
5290    }
5291    if (is_image)
5292        mem_info->bound_images.insert(handle);
5293    else
5294        mem_info->bound_buffers.insert(handle);
5295
5296    return skip_call;
5297}
5298
5299static bool InsertImageMemoryRange(layer_data const *dev_data, VkImage image, DEVICE_MEM_INFO *mem_info, VkDeviceSize mem_offset,
5300                                   VkMemoryRequirements mem_reqs, bool is_linear) {
5301    return InsertMemoryRange(dev_data, reinterpret_cast<uint64_t &>(image), mem_info, mem_offset, mem_reqs, true, is_linear);
5302}
5303
5304static bool InsertBufferMemoryRange(layer_data const *dev_data, VkBuffer buffer, DEVICE_MEM_INFO *mem_info, VkDeviceSize mem_offset,
5305                                    VkMemoryRequirements mem_reqs) {
5306    return InsertMemoryRange(dev_data, reinterpret_cast<uint64_t &>(buffer), mem_info, mem_offset, mem_reqs, false, true);
5307}
5308
5309// Remove MEMORY_RANGE struct for give handle from bound_ranges of mem_info
5310//  is_image indicates if handle is for image or buffer
5311//  This function will also remove the handle-to-index mapping from the appropriate
5312//  map and clean up any aliases for range being removed.
5313static void RemoveMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info, bool is_image) {
5314    auto erase_range = &mem_info->bound_ranges[handle];
5315    for (auto alias_range : erase_range->aliases) {
5316        alias_range->aliases.erase(erase_range);
5317    }
5318    erase_range->aliases.clear();
5319    mem_info->bound_ranges.erase(handle);
5320    if (is_image) {
5321        mem_info->bound_images.erase(handle);
5322    } else {
5323        mem_info->bound_buffers.erase(handle);
5324    }
5325}
5326
5327static void RemoveBufferMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info) { RemoveMemoryRange(handle, mem_info, false); }
5328
5329void RemoveImageMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info) { RemoveMemoryRange(handle, mem_info, true); }
5330
5331static bool PreCallValidateDestroyBuffer(layer_data *dev_data, VkBuffer buffer, BUFFER_STATE **buffer_state,
5332                                         VK_OBJECT *obj_struct) {
5333    *buffer_state = getBufferState(dev_data, buffer);
5334    *obj_struct = {reinterpret_cast<uint64_t &>(buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT};
5335    if (dev_data->instance_data->disabled.destroy_buffer) return false;
5336    bool skip = false;
5337    if (*buffer_state) {
5338        skip |= validateIdleBuffer(dev_data, buffer);
5339    }
5340    return skip;
5341}
5342
5343static void PostCallRecordDestroyBuffer(layer_data *dev_data, VkBuffer buffer, BUFFER_STATE *buffer_state, VK_OBJECT obj_struct) {
5344    invalidateCommandBuffers(dev_data, buffer_state->cb_bindings, obj_struct);
5345    for (auto mem_binding : buffer_state->GetBoundMemory()) {
5346        auto mem_info = getMemObjInfo(dev_data, mem_binding);
5347        if (mem_info) {
5348            RemoveBufferMemoryRange(reinterpret_cast<uint64_t &>(buffer), mem_info);
5349        }
5350    }
5351    ClearMemoryObjectBindings(dev_data, reinterpret_cast<uint64_t &>(buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT);
5352    dev_data->bufferMap.erase(buffer_state->buffer);
5353}
5354
5355VKAPI_ATTR void VKAPI_CALL DestroyBuffer(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks *pAllocator) {
5356    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5357    BUFFER_STATE *buffer_state = nullptr;
5358    VK_OBJECT obj_struct;
5359    std::unique_lock<std::mutex> lock(global_lock);
5360    bool skip = PreCallValidateDestroyBuffer(dev_data, buffer, &buffer_state, &obj_struct);
5361    if (!skip) {
5362        lock.unlock();
5363        dev_data->dispatch_table.DestroyBuffer(device, buffer, pAllocator);
5364        lock.lock();
5365        if (buffer != VK_NULL_HANDLE) {
5366            PostCallRecordDestroyBuffer(dev_data, buffer, buffer_state, obj_struct);
5367        }
5368    }
5369}
5370
5371static bool PreCallValidateDestroyBufferView(layer_data *dev_data, VkBufferView buffer_view, BUFFER_VIEW_STATE **buffer_view_state,
5372                                             VK_OBJECT *obj_struct) {
5373    *buffer_view_state = getBufferViewState(dev_data, buffer_view);
5374    *obj_struct = {reinterpret_cast<uint64_t &>(buffer_view), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT};
5375    if (dev_data->instance_data->disabled.destroy_buffer_view) return false;
5376    bool skip = false;
5377    if (*buffer_view_state) {
5378        skip |= ValidateObjectNotInUse(dev_data, *buffer_view_state, *obj_struct, VALIDATION_ERROR_00701);
5379    }
5380    return skip;
5381}
5382
5383static void PostCallRecordDestroyBufferView(layer_data *dev_data, VkBufferView buffer_view, BUFFER_VIEW_STATE *buffer_view_state,
5384                                            VK_OBJECT obj_struct) {
5385    // Any bound cmd buffers are now invalid
5386    invalidateCommandBuffers(dev_data, buffer_view_state->cb_bindings, obj_struct);
5387    dev_data->bufferViewMap.erase(buffer_view);
5388}
5389
5390VKAPI_ATTR void VKAPI_CALL DestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator) {
5391    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5392    // Common data objects used pre & post call
5393    BUFFER_VIEW_STATE *buffer_view_state = nullptr;
5394    VK_OBJECT obj_struct;
5395    std::unique_lock<std::mutex> lock(global_lock);
5396    // Validate state before calling down chain, update common data if we'll be calling down chain
5397    bool skip = PreCallValidateDestroyBufferView(dev_data, bufferView, &buffer_view_state, &obj_struct);
5398    if (!skip) {
5399        lock.unlock();
5400        dev_data->dispatch_table.DestroyBufferView(device, bufferView, pAllocator);
5401        lock.lock();
5402        if (bufferView != VK_NULL_HANDLE) {
5403            PostCallRecordDestroyBufferView(dev_data, bufferView, buffer_view_state, obj_struct);
5404        }
5405    }
5406}
5407
5408VKAPI_ATTR void VKAPI_CALL DestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
5409    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5410    IMAGE_STATE *image_state = nullptr;
5411    VK_OBJECT obj_struct;
5412    std::unique_lock<std::mutex> lock(global_lock);
5413    bool skip = PreCallValidateDestroyImage(dev_data, image, &image_state, &obj_struct);
5414    if (!skip) {
5415        lock.unlock();
5416        dev_data->dispatch_table.DestroyImage(device, image, pAllocator);
5417        lock.lock();
5418        if (image != VK_NULL_HANDLE) {
5419            PostCallRecordDestroyImage(dev_data, image, image_state, obj_struct);
5420        }
5421    }
5422}
5423
5424static bool ValidateMemoryTypes(const layer_data *dev_data, const DEVICE_MEM_INFO *mem_info, const uint32_t memory_type_bits,
5425                                const char *funcName, UNIQUE_VALIDATION_ERROR_CODE msgCode) {
5426    bool skip_call = false;
5427    if (((1 << mem_info->alloc_info.memoryTypeIndex) & memory_type_bits) == 0) {
5428        skip_call =
5429            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5430                    reinterpret_cast<const uint64_t &>(mem_info->mem), __LINE__, msgCode, "MT",
5431                    "%s(): MemoryRequirements->memoryTypeBits (0x%X) for this object type are not compatible with the memory "
5432                    "type (0x%X) of this memory object 0x%" PRIx64 ". %s",
5433                    funcName, memory_type_bits, mem_info->alloc_info.memoryTypeIndex,
5434                    reinterpret_cast<const uint64_t &>(mem_info->mem), validation_error_map[msgCode]);
5435    }
5436    return skip_call;
5437}
5438
5439VKAPI_ATTR VkResult VKAPI_CALL BindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
5440    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5441    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5442    std::unique_lock<std::mutex> lock(global_lock);
5443    // Track objects tied to memory
5444    uint64_t buffer_handle = reinterpret_cast<uint64_t &>(buffer);
5445    bool skip_call = SetMemBinding(dev_data, mem, buffer_handle, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "vkBindBufferMemory");
5446    auto buffer_state = getBufferState(dev_data, buffer);
5447    if (buffer_state) {
5448        if (!buffer_state->memory_requirements_checked) {
5449            // There's not an explicit requirement in the spec to call vkGetBufferMemoryRequirements() prior to calling
5450            //  BindBufferMemory but it's implied in that memory being bound must conform with VkMemoryRequirements from
5451            //  vkGetBufferMemoryRequirements()
5452            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5453                                 buffer_handle, __LINE__, DRAWSTATE_INVALID_BUFFER, "DS",
5454                                 "vkBindBufferMemory(): Binding memory to buffer 0x%" PRIxLEAST64
5455                                 " but vkGetBufferMemoryRequirements() has not been called on that buffer.",
5456                                 buffer_handle);
5457            // Make the call for them so we can verify the state
5458            lock.unlock();
5459            dev_data->dispatch_table.GetBufferMemoryRequirements(device, buffer, &buffer_state->requirements);
5460            lock.lock();
5461        }
5462        buffer_state->binding.mem = mem;
5463        buffer_state->binding.offset = memoryOffset;
5464        buffer_state->binding.size = buffer_state->requirements.size;
5465
5466        // Track and validate bound memory range information
5467        auto mem_info = getMemObjInfo(dev_data, mem);
5468        if (mem_info) {
5469            skip_call |= InsertBufferMemoryRange(dev_data, buffer, mem_info, memoryOffset, buffer_state->requirements);
5470            skip_call |= ValidateMemoryTypes(dev_data, mem_info, buffer_state->requirements.memoryTypeBits, "vkBindBufferMemory()",
5471                                             VALIDATION_ERROR_00797);
5472        }
5473
5474        // Validate memory requirements alignment
5475        if (vk_safe_modulo(memoryOffset, buffer_state->requirements.alignment) != 0) {
5476            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5477                                 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, VALIDATION_ERROR_02174, "DS",
5478                                 "vkBindBufferMemory(): memoryOffset is 0x%" PRIxLEAST64
5479                                 " but must be an integer multiple of the "
5480                                 "VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
5481                                 ", returned from a call to vkGetBufferMemoryRequirements with buffer. %s",
5482                                 memoryOffset, buffer_state->requirements.alignment, validation_error_map[VALIDATION_ERROR_02174]);
5483        }
5484
5485        // Validate device limits alignments
5486        static const VkBufferUsageFlagBits usage_list[3] = {
5487            static_cast<VkBufferUsageFlagBits>(VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT),
5488            VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT};
5489        static const char *memory_type[3] = {"texel", "uniform", "storage"};
5490        static const char *offset_name[3] = {"minTexelBufferOffsetAlignment", "minUniformBufferOffsetAlignment",
5491                                             "minStorageBufferOffsetAlignment"};
5492
5493        // TODO:  vk_validation_stats.py cannot abide braces immediately preceeding or following a validation error enum
5494        // clang-format off
5495        static const UNIQUE_VALIDATION_ERROR_CODE msgCode[3] = { VALIDATION_ERROR_00794, VALIDATION_ERROR_00795,
5496                                                                 VALIDATION_ERROR_00796 };
5497        // clang-format on
5498
5499        // Keep this one fresh!
5500        const VkDeviceSize offset_requirement[3] = {
5501            dev_data->phys_dev_properties.properties.limits.minTexelBufferOffsetAlignment,
5502            dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment,
5503            dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment};
5504        VkBufferUsageFlags usage = dev_data->bufferMap[buffer].get()->createInfo.usage;
5505
5506        for (int i = 0; i < 3; i++) {
5507            if (usage & usage_list[i]) {
5508                if (vk_safe_modulo(memoryOffset, offset_requirement[i]) != 0) {
5509                    skip_call |= log_msg(
5510                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
5511                        __LINE__, msgCode[i], "DS", "vkBindBufferMemory(): %s memoryOffset is 0x%" PRIxLEAST64
5512                                                    " but must be a multiple of "
5513                                                    "device limit %s 0x%" PRIxLEAST64 ". %s",
5514                        memory_type[i], memoryOffset, offset_name[i], offset_requirement[i], validation_error_map[msgCode[i]]);
5515                }
5516            }
5517        }
5518    }
5519    lock.unlock();
5520    if (!skip_call) {
5521        result = dev_data->dispatch_table.BindBufferMemory(device, buffer, mem, memoryOffset);
5522    }
5523    return result;
5524}
5525
5526VKAPI_ATTR void VKAPI_CALL GetBufferMemoryRequirements(VkDevice device, VkBuffer buffer,
5527                                                       VkMemoryRequirements *pMemoryRequirements) {
5528    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5529    dev_data->dispatch_table.GetBufferMemoryRequirements(device, buffer, pMemoryRequirements);
5530    auto buffer_state = getBufferState(dev_data, buffer);
5531    if (buffer_state) {
5532        buffer_state->requirements = *pMemoryRequirements;
5533        buffer_state->memory_requirements_checked = true;
5534    }
5535}
5536
5537VKAPI_ATTR void VKAPI_CALL GetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements *pMemoryRequirements) {
5538    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5539    dev_data->dispatch_table.GetImageMemoryRequirements(device, image, pMemoryRequirements);
5540    auto image_state = getImageState(dev_data, image);
5541    if (image_state) {
5542        image_state->requirements = *pMemoryRequirements;
5543        image_state->memory_requirements_checked = true;
5544    }
5545}
5546
5547static bool PreCallValidateDestroyImageView(layer_data *dev_data, VkImageView image_view, IMAGE_VIEW_STATE **image_view_state,
5548                                            VK_OBJECT *obj_struct) {
5549    *image_view_state = getImageViewState(dev_data, image_view);
5550    *obj_struct = {reinterpret_cast<uint64_t &>(image_view), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT};
5551    if (dev_data->instance_data->disabled.destroy_image_view) return false;
5552    bool skip = false;
5553    if (*image_view_state) {
5554        skip |= ValidateObjectNotInUse(dev_data, *image_view_state, *obj_struct, VALIDATION_ERROR_00776);
5555    }
5556    return skip;
5557}
5558
5559static void PostCallRecordDestroyImageView(layer_data *dev_data, VkImageView image_view, IMAGE_VIEW_STATE *image_view_state,
5560                                           VK_OBJECT obj_struct) {
5561    // Any bound cmd buffers are now invalid
5562    invalidateCommandBuffers(dev_data, image_view_state->cb_bindings, obj_struct);
5563    dev_data->imageViewMap.erase(image_view);
5564}
5565
5566VKAPI_ATTR void VKAPI_CALL DestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator) {
5567    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5568    // Common data objects used pre & post call
5569    IMAGE_VIEW_STATE *image_view_state = nullptr;
5570    VK_OBJECT obj_struct;
5571    std::unique_lock<std::mutex> lock(global_lock);
5572    bool skip = PreCallValidateDestroyImageView(dev_data, imageView, &image_view_state, &obj_struct);
5573    if (!skip) {
5574        lock.unlock();
5575        dev_data->dispatch_table.DestroyImageView(device, imageView, pAllocator);
5576        lock.lock();
5577        if (imageView != VK_NULL_HANDLE) {
5578            PostCallRecordDestroyImageView(dev_data, imageView, image_view_state, obj_struct);
5579        }
5580    }
5581}
5582
5583VKAPI_ATTR void VKAPI_CALL DestroyShaderModule(VkDevice device, VkShaderModule shaderModule,
5584                                               const VkAllocationCallbacks *pAllocator) {
5585    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5586
5587    std::unique_lock<std::mutex> lock(global_lock);
5588    dev_data->shaderModuleMap.erase(shaderModule);
5589    lock.unlock();
5590
5591    dev_data->dispatch_table.DestroyShaderModule(device, shaderModule, pAllocator);
5592}
5593
5594static bool PreCallValidateDestroyPipeline(layer_data *dev_data, VkPipeline pipeline, PIPELINE_STATE **pipeline_state,
5595                                           VK_OBJECT *obj_struct) {
5596    *pipeline_state = getPipelineState(dev_data, pipeline);
5597    *obj_struct = {reinterpret_cast<uint64_t &>(pipeline), VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT};
5598    if (dev_data->instance_data->disabled.destroy_pipeline) return false;
5599    bool skip = false;
5600    if (*pipeline_state) {
5601        skip |= ValidateObjectNotInUse(dev_data, *pipeline_state, *obj_struct, VALIDATION_ERROR_00555);
5602    }
5603    return skip;
5604}
5605
5606static void PostCallRecordDestroyPipeline(layer_data *dev_data, VkPipeline pipeline, PIPELINE_STATE *pipeline_state,
5607                                          VK_OBJECT obj_struct) {
5608    // Any bound cmd buffers are now invalid
5609    invalidateCommandBuffers(dev_data, pipeline_state->cb_bindings, obj_struct);
5610    dev_data->pipelineMap.erase(pipeline);
5611}
5612
5613VKAPI_ATTR void VKAPI_CALL DestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) {
5614    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5615    PIPELINE_STATE *pipeline_state = nullptr;
5616    VK_OBJECT obj_struct;
5617    std::unique_lock<std::mutex> lock(global_lock);
5618    bool skip = PreCallValidateDestroyPipeline(dev_data, pipeline, &pipeline_state, &obj_struct);
5619    if (!skip) {
5620        lock.unlock();
5621        dev_data->dispatch_table.DestroyPipeline(device, pipeline, pAllocator);
5622        lock.lock();
5623        if (pipeline != VK_NULL_HANDLE) {
5624            PostCallRecordDestroyPipeline(dev_data, pipeline, pipeline_state, obj_struct);
5625        }
5626    }
5627}
5628
5629VKAPI_ATTR void VKAPI_CALL DestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout,
5630                                                 const VkAllocationCallbacks *pAllocator) {
5631    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5632    std::unique_lock<std::mutex> lock(global_lock);
5633    dev_data->pipelineLayoutMap.erase(pipelineLayout);
5634    lock.unlock();
5635
5636    dev_data->dispatch_table.DestroyPipelineLayout(device, pipelineLayout, pAllocator);
5637}
5638
5639static bool PreCallValidateDestroySampler(layer_data *dev_data, VkSampler sampler, SAMPLER_STATE **sampler_state,
5640                                          VK_OBJECT *obj_struct) {
5641    *sampler_state = getSamplerState(dev_data, sampler);
5642    *obj_struct = {reinterpret_cast<uint64_t &>(sampler), VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT};
5643    if (dev_data->instance_data->disabled.destroy_sampler) return false;
5644    bool skip = false;
5645    if (*sampler_state) {
5646        skip |= ValidateObjectNotInUse(dev_data, *sampler_state, *obj_struct, VALIDATION_ERROR_00837);
5647    }
5648    return skip;
5649}
5650
5651static void PostCallRecordDestroySampler(layer_data *dev_data, VkSampler sampler, SAMPLER_STATE *sampler_state,
5652                                         VK_OBJECT obj_struct) {
5653    // Any bound cmd buffers are now invalid
5654    if (sampler_state) invalidateCommandBuffers(dev_data, sampler_state->cb_bindings, obj_struct);
5655    dev_data->samplerMap.erase(sampler);
5656}
5657
5658VKAPI_ATTR void VKAPI_CALL DestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) {
5659    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5660    SAMPLER_STATE *sampler_state = nullptr;
5661    VK_OBJECT obj_struct;
5662    std::unique_lock<std::mutex> lock(global_lock);
5663    bool skip = PreCallValidateDestroySampler(dev_data, sampler, &sampler_state, &obj_struct);
5664    if (!skip) {
5665        lock.unlock();
5666        dev_data->dispatch_table.DestroySampler(device, sampler, pAllocator);
5667        lock.lock();
5668        if (sampler != VK_NULL_HANDLE) {
5669            PostCallRecordDestroySampler(dev_data, sampler, sampler_state, obj_struct);
5670        }
5671    }
5672}
5673
5674static void PostCallRecordDestroyDescriptorSetLayout(layer_data *dev_data, VkDescriptorSetLayout ds_layout) {
5675    dev_data->descriptorSetLayoutMap.erase(ds_layout);
5676}
5677
5678VKAPI_ATTR void VKAPI_CALL DestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout,
5679                                                      const VkAllocationCallbacks *pAllocator) {
5680    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5681    dev_data->dispatch_table.DestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator);
5682    std::unique_lock<std::mutex> lock(global_lock);
5683    PostCallRecordDestroyDescriptorSetLayout(dev_data, descriptorSetLayout);
5684}
5685
5686static bool PreCallValidateDestroyDescriptorPool(layer_data *dev_data, VkDescriptorPool pool,
5687                                                 DESCRIPTOR_POOL_STATE **desc_pool_state, VK_OBJECT *obj_struct) {
5688    *desc_pool_state = getDescriptorPoolState(dev_data, pool);
5689    *obj_struct = {reinterpret_cast<uint64_t &>(pool), VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT};
5690    if (dev_data->instance_data->disabled.destroy_descriptor_pool) return false;
5691    bool skip = false;
5692    if (*desc_pool_state) {
5693        skip |= ValidateObjectNotInUse(dev_data, *desc_pool_state, *obj_struct, VALIDATION_ERROR_00901);
5694    }
5695    return skip;
5696}
5697
5698static void PostCallRecordDestroyDescriptorPool(layer_data *dev_data, VkDescriptorPool descriptorPool,
5699                                                DESCRIPTOR_POOL_STATE *desc_pool_state, VK_OBJECT obj_struct) {
5700    // Any bound cmd buffers are now invalid
5701    invalidateCommandBuffers(dev_data, desc_pool_state->cb_bindings, obj_struct);
5702    // Free sets that were in this pool
5703    for (auto ds : desc_pool_state->sets) {
5704        freeDescriptorSet(dev_data, ds);
5705    }
5706    dev_data->descriptorPoolMap.erase(descriptorPool);
5707}
5708
5709VKAPI_ATTR void VKAPI_CALL DestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
5710                                                 const VkAllocationCallbacks *pAllocator) {
5711    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5712    DESCRIPTOR_POOL_STATE *desc_pool_state = nullptr;
5713    VK_OBJECT obj_struct;
5714    std::unique_lock<std::mutex> lock(global_lock);
5715    bool skip = PreCallValidateDestroyDescriptorPool(dev_data, descriptorPool, &desc_pool_state, &obj_struct);
5716    if (!skip) {
5717        lock.unlock();
5718        dev_data->dispatch_table.DestroyDescriptorPool(device, descriptorPool, pAllocator);
5719        lock.lock();
5720        if (descriptorPool != VK_NULL_HANDLE) {
5721            PostCallRecordDestroyDescriptorPool(dev_data, descriptorPool, desc_pool_state, obj_struct);
5722        }
5723    }
5724}
5725// Verify cmdBuffer in given cb_node is not in global in-flight set, and return skip_call result
5726//  If this is a secondary command buffer, then make sure its primary is also in-flight
5727//  If primary is not in-flight, then remove secondary from global in-flight set
5728// This function is only valid at a point when cmdBuffer is being reset or freed
5729static bool checkCommandBufferInFlight(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const char *action,
5730                                       UNIQUE_VALIDATION_ERROR_CODE error_code) {
5731    bool skip_call = false;
5732    if (dev_data->globalInFlightCmdBuffers.count(cb_node->commandBuffer)) {
5733        // Primary CB or secondary where primary is also in-flight is an error
5734        if ((cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_SECONDARY) ||
5735            (dev_data->globalInFlightCmdBuffers.count(cb_node->primaryCommandBuffer))) {
5736            skip_call |=
5737                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5738                        reinterpret_cast<const uint64_t &>(cb_node->commandBuffer), __LINE__, error_code, "DS",
5739                        "Attempt to %s command buffer (0x%p) which is in use. %s", action, cb_node->commandBuffer,
5740                        validation_error_map[error_code]);
5741        }
5742    }
5743    return skip_call;
5744}
5745
5746// Iterate over all cmdBuffers in given commandPool and verify that each is not in use
5747static bool checkCommandBuffersInFlight(layer_data *dev_data, COMMAND_POOL_NODE *pPool, const char *action,
5748                                        UNIQUE_VALIDATION_ERROR_CODE error_code) {
5749    bool skip_call = false;
5750    for (auto cmd_buffer : pPool->commandBuffers) {
5751        if (dev_data->globalInFlightCmdBuffers.count(cmd_buffer)) {
5752            skip_call |= checkCommandBufferInFlight(dev_data, getCBNode(dev_data, cmd_buffer), action, error_code);
5753        }
5754    }
5755    return skip_call;
5756}
5757
5758static void clearCommandBuffersInFlight(layer_data *dev_data, COMMAND_POOL_NODE *pPool) {
5759    for (auto cmd_buffer : pPool->commandBuffers) {
5760        dev_data->globalInFlightCmdBuffers.erase(cmd_buffer);
5761    }
5762}
5763
5764VKAPI_ATTR void VKAPI_CALL FreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount,
5765                                              const VkCommandBuffer *pCommandBuffers) {
5766    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5767    bool skip_call = false;
5768    std::unique_lock<std::mutex> lock(global_lock);
5769
5770    for (uint32_t i = 0; i < commandBufferCount; i++) {
5771        auto cb_node = getCBNode(dev_data, pCommandBuffers[i]);
5772        // Delete CB information structure, and remove from commandBufferMap
5773        if (cb_node) {
5774            skip_call |= checkCommandBufferInFlight(dev_data, cb_node, "free", VALIDATION_ERROR_00096);
5775        }
5776    }
5777
5778    if (skip_call) return;
5779
5780    auto pPool = getCommandPoolNode(dev_data, commandPool);
5781    for (uint32_t i = 0; i < commandBufferCount; i++) {
5782        auto cb_node = getCBNode(dev_data, pCommandBuffers[i]);
5783        // Delete CB information structure, and remove from commandBufferMap
5784        if (cb_node) {
5785            dev_data->globalInFlightCmdBuffers.erase(cb_node->commandBuffer);
5786            // reset prior to delete for data clean-up
5787            resetCB(dev_data, cb_node->commandBuffer);
5788            dev_data->commandBufferMap.erase(cb_node->commandBuffer);
5789            delete cb_node;
5790        }
5791
5792        // Remove commandBuffer reference from commandPoolMap
5793        pPool->commandBuffers.remove(pCommandBuffers[i]);
5794    }
5795    lock.unlock();
5796
5797    dev_data->dispatch_table.FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);
5798}
5799
5800VKAPI_ATTR VkResult VKAPI_CALL CreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
5801                                                 const VkAllocationCallbacks *pAllocator, VkCommandPool *pCommandPool) {
5802    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5803
5804    VkResult result = dev_data->dispatch_table.CreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool);
5805
5806    if (VK_SUCCESS == result) {
5807        std::lock_guard<std::mutex> lock(global_lock);
5808        dev_data->commandPoolMap[*pCommandPool].createFlags = pCreateInfo->flags;
5809        dev_data->commandPoolMap[*pCommandPool].queueFamilyIndex = pCreateInfo->queueFamilyIndex;
5810    }
5811    return result;
5812}
5813
5814VKAPI_ATTR VkResult VKAPI_CALL CreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
5815                                               const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) {
5816    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5817    bool skip = false;
5818    if (pCreateInfo && pCreateInfo->queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS) {
5819        if (!dev_data->enabled_features.pipelineStatisticsQuery) {
5820            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
5821                            __LINE__, VALIDATION_ERROR_01006, "DS",
5822                            "Query pool with type VK_QUERY_TYPE_PIPELINE_STATISTICS created on a device "
5823                            "with VkDeviceCreateInfo.pEnabledFeatures.pipelineStatisticsQuery == VK_FALSE. %s",
5824                            validation_error_map[VALIDATION_ERROR_01006]);
5825        }
5826    }
5827
5828    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5829    if (!skip) {
5830        result = dev_data->dispatch_table.CreateQueryPool(device, pCreateInfo, pAllocator, pQueryPool);
5831    }
5832    if (result == VK_SUCCESS) {
5833        std::lock_guard<std::mutex> lock(global_lock);
5834        QUERY_POOL_NODE *qp_node = &dev_data->queryPoolMap[*pQueryPool];
5835        qp_node->createInfo = *pCreateInfo;
5836    }
5837    return result;
5838}
5839
5840static bool PreCallValidateDestroyCommandPool(layer_data *dev_data, VkCommandPool pool, COMMAND_POOL_NODE **cp_state) {
5841    *cp_state = getCommandPoolNode(dev_data, pool);
5842    if (dev_data->instance_data->disabled.destroy_command_pool) return false;
5843    bool skip = false;
5844    if (*cp_state) {
5845        // Verify that command buffers in pool are complete (not in-flight)
5846        skip |= checkCommandBuffersInFlight(dev_data, *cp_state, "destroy command pool with", VALIDATION_ERROR_00077);
5847    }
5848    return skip;
5849}
5850
5851static void PostCallRecordDestroyCommandPool(layer_data *dev_data, VkCommandPool pool, COMMAND_POOL_NODE *cp_state) {
5852    // Must remove cmdpool from cmdpoolmap, after removing all cmdbuffers in its list from the commandBufferMap
5853    clearCommandBuffersInFlight(dev_data, cp_state);
5854    for (auto cb : cp_state->commandBuffers) {
5855        clear_cmd_buf_and_mem_references(dev_data, cb);
5856        auto cb_node = getCBNode(dev_data, cb);
5857        // Remove references to this cb_node prior to delete
5858        // TODO : Need better solution here, resetCB?
5859        for (auto obj : cb_node->object_bindings) {
5860            removeCommandBufferBinding(dev_data, &obj, cb_node);
5861        }
5862        for (auto framebuffer : cb_node->framebuffers) {
5863            auto fb_state = getFramebufferState(dev_data, framebuffer);
5864            if (fb_state) fb_state->cb_bindings.erase(cb_node);
5865        }
5866        dev_data->commandBufferMap.erase(cb);  // Remove this command buffer
5867        delete cb_node;                        // delete CB info structure
5868    }
5869    dev_data->commandPoolMap.erase(pool);
5870}
5871
5872// Destroy commandPool along with all of the commandBuffers allocated from that pool
5873VKAPI_ATTR void VKAPI_CALL DestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
5874    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5875    COMMAND_POOL_NODE *cp_state = nullptr;
5876    std::unique_lock<std::mutex> lock(global_lock);
5877    bool skip = PreCallValidateDestroyCommandPool(dev_data, commandPool, &cp_state);
5878    if (!skip) {
5879        lock.unlock();
5880        dev_data->dispatch_table.DestroyCommandPool(device, commandPool, pAllocator);
5881        lock.lock();
5882        if (commandPool != VK_NULL_HANDLE) {
5883            PostCallRecordDestroyCommandPool(dev_data, commandPool, cp_state);
5884        }
5885    }
5886}
5887
5888VKAPI_ATTR VkResult VKAPI_CALL ResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) {
5889    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5890    bool skip_call = false;
5891
5892    std::unique_lock<std::mutex> lock(global_lock);
5893    auto pPool = getCommandPoolNode(dev_data, commandPool);
5894    skip_call |= checkCommandBuffersInFlight(dev_data, pPool, "reset command pool with", VALIDATION_ERROR_00072);
5895    lock.unlock();
5896
5897    if (skip_call) return VK_ERROR_VALIDATION_FAILED_EXT;
5898
5899    VkResult result = dev_data->dispatch_table.ResetCommandPool(device, commandPool, flags);
5900
5901    // Reset all of the CBs allocated from this pool
5902    if (VK_SUCCESS == result) {
5903        lock.lock();
5904        clearCommandBuffersInFlight(dev_data, pPool);
5905        for (auto cmdBuffer : pPool->commandBuffers) {
5906            resetCB(dev_data, cmdBuffer);
5907        }
5908        lock.unlock();
5909    }
5910    return result;
5911}
5912
5913VKAPI_ATTR VkResult VKAPI_CALL ResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) {
5914    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5915    bool skip_call = false;
5916    std::unique_lock<std::mutex> lock(global_lock);
5917    for (uint32_t i = 0; i < fenceCount; ++i) {
5918        auto pFence = getFenceNode(dev_data, pFences[i]);
5919        if (pFence && pFence->state == FENCE_INFLIGHT) {
5920            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5921                                 reinterpret_cast<const uint64_t &>(pFences[i]), __LINE__, VALIDATION_ERROR_00183, "DS",
5922                                 "Fence 0x%" PRIx64 " is in use. %s", reinterpret_cast<const uint64_t &>(pFences[i]),
5923                                 validation_error_map[VALIDATION_ERROR_00183]);
5924        }
5925    }
5926    lock.unlock();
5927
5928    if (skip_call) return VK_ERROR_VALIDATION_FAILED_EXT;
5929
5930    VkResult result = dev_data->dispatch_table.ResetFences(device, fenceCount, pFences);
5931
5932    if (result == VK_SUCCESS) {
5933        lock.lock();
5934        for (uint32_t i = 0; i < fenceCount; ++i) {
5935            auto pFence = getFenceNode(dev_data, pFences[i]);
5936            if (pFence) {
5937                pFence->state = FENCE_UNSIGNALED;
5938            }
5939        }
5940        lock.unlock();
5941    }
5942
5943    return result;
5944}
5945
5946// For given cb_nodes, invalidate them and track object causing invalidation
5947void invalidateCommandBuffers(const layer_data *dev_data, std::unordered_set<GLOBAL_CB_NODE *> const &cb_nodes, VK_OBJECT obj) {
5948    for (auto cb_node : cb_nodes) {
5949        if (cb_node->state == CB_RECORDING) {
5950            log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5951                    (uint64_t)(cb_node->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
5952                    "Invalidating a command buffer that's currently being recorded: 0x%p.", cb_node->commandBuffer);
5953        }
5954        cb_node->state = CB_INVALID;
5955        cb_node->broken_bindings.push_back(obj);
5956    }
5957}
5958
5959static bool PreCallValidateDestroyFramebuffer(layer_data *dev_data, VkFramebuffer framebuffer,
5960                                              FRAMEBUFFER_STATE **framebuffer_state, VK_OBJECT *obj_struct) {
5961    *framebuffer_state = getFramebufferState(dev_data, framebuffer);
5962    *obj_struct = {reinterpret_cast<uint64_t &>(framebuffer), VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT};
5963    if (dev_data->instance_data->disabled.destroy_framebuffer) return false;
5964    bool skip = false;
5965    if (*framebuffer_state) {
5966        skip |= ValidateObjectNotInUse(dev_data, *framebuffer_state, *obj_struct, VALIDATION_ERROR_00422);
5967    }
5968    return skip;
5969}
5970
5971static void PostCallRecordDestroyFramebuffer(layer_data *dev_data, VkFramebuffer framebuffer, FRAMEBUFFER_STATE *framebuffer_state,
5972                                             VK_OBJECT obj_struct) {
5973    invalidateCommandBuffers(dev_data, framebuffer_state->cb_bindings, obj_struct);
5974    dev_data->frameBufferMap.erase(framebuffer);
5975}
5976
5977VKAPI_ATTR void VKAPI_CALL DestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) {
5978    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5979    FRAMEBUFFER_STATE *framebuffer_state = nullptr;
5980    VK_OBJECT obj_struct;
5981    std::unique_lock<std::mutex> lock(global_lock);
5982    bool skip = PreCallValidateDestroyFramebuffer(dev_data, framebuffer, &framebuffer_state, &obj_struct);
5983    if (!skip) {
5984        lock.unlock();
5985        dev_data->dispatch_table.DestroyFramebuffer(device, framebuffer, pAllocator);
5986        lock.lock();
5987        if (framebuffer != VK_NULL_HANDLE) {
5988            PostCallRecordDestroyFramebuffer(dev_data, framebuffer, framebuffer_state, obj_struct);
5989        }
5990    }
5991}
5992
5993static bool PreCallValidateDestroyRenderPass(layer_data *dev_data, VkRenderPass render_pass, RENDER_PASS_STATE **rp_state,
5994                                             VK_OBJECT *obj_struct) {
5995    *rp_state = getRenderPassState(dev_data, render_pass);
5996    *obj_struct = {reinterpret_cast<uint64_t &>(render_pass), VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT};
5997    if (dev_data->instance_data->disabled.destroy_renderpass) return false;
5998    bool skip = false;
5999    if (*rp_state) {
6000        skip |= ValidateObjectNotInUse(dev_data, *rp_state, *obj_struct, VALIDATION_ERROR_00393);
6001    }
6002    return skip;
6003}
6004
6005static void PostCallRecordDestroyRenderPass(layer_data *dev_data, VkRenderPass render_pass, RENDER_PASS_STATE *rp_state,
6006                                            VK_OBJECT obj_struct) {
6007    invalidateCommandBuffers(dev_data, rp_state->cb_bindings, obj_struct);
6008    dev_data->renderPassMap.erase(render_pass);
6009}
6010
6011VKAPI_ATTR void VKAPI_CALL DestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) {
6012    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
6013    RENDER_PASS_STATE *rp_state = nullptr;
6014    VK_OBJECT obj_struct;
6015    std::unique_lock<std::mutex> lock(global_lock);
6016    bool skip = PreCallValidateDestroyRenderPass(dev_data, renderPass, &rp_state, &obj_struct);
6017    if (!skip) {
6018        lock.unlock();
6019        dev_data->dispatch_table.DestroyRenderPass(device, renderPass, pAllocator);
6020        lock.lock();
6021        if (renderPass != VK_NULL_HANDLE) {
6022            PostCallRecordDestroyRenderPass(dev_data, renderPass, rp_state, obj_struct);
6023        }
6024    }
6025}
6026
6027VKAPI_ATTR VkResult VKAPI_CALL CreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo,
6028                                            const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) {
6029    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
6030    // TODO: Add check for VALIDATION_ERROR_00658
6031    // TODO: Add check for VALIDATION_ERROR_00666
6032    // TODO: Add check for VALIDATION_ERROR_00667
6033    // TODO: Add check for VALIDATION_ERROR_00668
6034    // TODO: Add check for VALIDATION_ERROR_00669
6035    VkResult result = dev_data->dispatch_table.CreateBuffer(device, pCreateInfo, pAllocator, pBuffer);
6036
6037    if (VK_SUCCESS == result) {
6038        std::lock_guard<std::mutex> lock(global_lock);
6039        // TODO : This doesn't create deep copy of pQueueFamilyIndices so need to fix that if/when we want that data to be valid
6040        dev_data->bufferMap.insert(std::make_pair(*pBuffer, unique_ptr<BUFFER_STATE>(new BUFFER_STATE(*pBuffer, pCreateInfo))));
6041    }
6042    return result;
6043}
6044
6045static bool PreCallValidateCreateBufferView(layer_data *dev_data, const VkBufferViewCreateInfo *pCreateInfo) {
6046    bool skip_call = false;
6047    BUFFER_STATE *buffer_state = getBufferState(dev_data, pCreateInfo->buffer);
6048    // If this isn't a sparse buffer, it needs to have memory backing it at CreateBufferView time
6049    if (buffer_state) {
6050        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buffer_state, "vkCreateBufferView()", VALIDATION_ERROR_02522);
6051        // In order to create a valid buffer view, the buffer must have been created with at least one of the
6052        // following flags:  UNIFORM_TEXEL_BUFFER_BIT or STORAGE_TEXEL_BUFFER_BIT
6053        skip_call |= ValidateBufferUsageFlags(
6054            dev_data, buffer_state, VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT, false,
6055            VALIDATION_ERROR_00694, "vkCreateBufferView()", "VK_BUFFER_USAGE_[STORAGE|UNIFORM]_TEXEL_BUFFER_BIT");
6056    }
6057    return skip_call;
6058}
6059
6060VKAPI_ATTR VkResult VKAPI_CALL CreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo,
6061                                                const VkAllocationCallbacks *pAllocator, VkBufferView *pView) {
6062    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
6063    std::unique_lock<std::mutex> lock(global_lock);
6064    bool skip_call = PreCallValidateCreateBufferView(dev_data, pCreateInfo);
6065    lock.unlock();
6066    if (skip_call) return VK_ERROR_VALIDATION_FAILED_EXT;
6067    VkResult result = dev_data->dispatch_table.CreateBufferView(device, pCreateInfo, pAllocator, pView);
6068    if (VK_SUCCESS == result) {
6069        lock.lock();
6070        dev_data->bufferViewMap[*pView] = unique_ptr<BUFFER_VIEW_STATE>(new BUFFER_VIEW_STATE(*pView, pCreateInfo));
6071        lock.unlock();
6072    }
6073    return result;
6074}
6075
6076// Access helper functions for external modules
6077PFN_vkGetPhysicalDeviceFormatProperties GetFormatPropertiesPointer(core_validation::layer_data *device_data) {
6078    return device_data->instance_data->dispatch_table.GetPhysicalDeviceFormatProperties;
6079}
6080
6081PFN_vkGetPhysicalDeviceImageFormatProperties GetImageFormatPropertiesPointer(core_validation::layer_data *device_data) {
6082    return device_data->instance_data->dispatch_table.GetPhysicalDeviceImageFormatProperties;
6083}
6084
6085VkPhysicalDevice GetPhysicalDevice(core_validation::layer_data *device_data) { return device_data->physical_device; }
6086
6087const debug_report_data *GetReportData(core_validation::layer_data *device_data) { return device_data->report_data; }
6088
6089const VkPhysicalDeviceProperties *GetPhysicalDeviceProperties(core_validation::layer_data *device_data) {
6090    return &device_data->phys_dev_props;
6091}
6092
6093const CHECK_DISABLED *GetDisables(core_validation::layer_data *device_data) { return &device_data->instance_data->disabled; }
6094
6095std::unordered_map<VkImage, std::unique_ptr<IMAGE_STATE>> *GetImageMap(core_validation::layer_data *device_data) {
6096    return &device_data->imageMap;
6097}
6098
6099std::unordered_map<VkImage, std::vector<ImageSubresourcePair>> *GetImageSubresourceMap(core_validation::layer_data *device_data) {
6100    return &device_data->imageSubresourceMap;
6101}
6102
6103std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> *GetImageLayoutMap(layer_data *device_data) {
6104    return &device_data->imageLayoutMap;
6105}
6106
6107VKAPI_ATTR VkResult VKAPI_CALL CreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo,
6108                                           const VkAllocationCallbacks *pAllocator, VkImage *pImage) {
6109    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
6110    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
6111    bool skip = PreCallValidateCreateImage(dev_data, pCreateInfo, pAllocator, pImage);
6112    if (!skip) {
6113        result = dev_data->dispatch_table.CreateImage(device, pCreateInfo, pAllocator, pImage);
6114    }
6115    if (VK_SUCCESS == result) {
6116        std::lock_guard<std::mutex> lock(global_lock);
6117        PostCallRecordCreateImage(dev_data, pCreateInfo, pImage);
6118    }
6119    return result;
6120}
6121
6122// For the given format verify that the aspect masks make sense
6123static bool ValidateImageAspectMask(layer_data *dev_data, VkImage image, VkFormat format, VkImageAspectFlags aspect_mask,
6124                                    const char *func_name) {
6125    bool skip = false;
6126    if (vk_format_is_color(format)) {
6127        if ((aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT) != VK_IMAGE_ASPECT_COLOR_BIT) {
6128            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
6129                            (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE",
6130                            "%s: Color image formats must have the VK_IMAGE_ASPECT_COLOR_BIT set. %s", func_name,
6131                            validation_error_map[VALIDATION_ERROR_00741]);
6132        } else if ((aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT) != aspect_mask) {
6133            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
6134                            (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE",
6135                            "%s: Color image formats must have ONLY the VK_IMAGE_ASPECT_COLOR_BIT set. %s", func_name,
6136                            validation_error_map[VALIDATION_ERROR_00741]);
6137        }
6138    } else if (vk_format_is_depth_and_stencil(format)) {
6139        if ((aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) == 0) {
6140            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
6141                            (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE",
6142                            "%s: Depth/stencil image formats must have "
6143                            "at least one of VK_IMAGE_ASPECT_DEPTH_BIT "
6144                            "and VK_IMAGE_ASPECT_STENCIL_BIT set. %s",
6145                            func_name, validation_error_map[VALIDATION_ERROR_00741]);
6146        } else if ((aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) != aspect_mask) {
6147            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
6148                            (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE",
6149                            "%s: Combination depth/stencil image formats can have only the VK_IMAGE_ASPECT_DEPTH_BIT and "
6150                            "VK_IMAGE_ASPECT_STENCIL_BIT set. %s",
6151                            func_name, validation_error_map[VALIDATION_ERROR_00741]);
6152        }
6153    } else if (vk_format_is_depth_only(format)) {
6154        if ((aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT) != VK_IMAGE_ASPECT_DEPTH_BIT) {
6155            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
6156                            (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE",
6157                            "%s: Depth-only image formats must have the VK_IMAGE_ASPECT_DEPTH_BIT set. %s", func_name,
6158                            validation_error_map[VALIDATION_ERROR_00741]);
6159        } else if ((aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT) != aspect_mask) {
6160            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
6161                            (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE",
6162                            "%s: Depth-only image formats can have only the VK_IMAGE_ASPECT_DEPTH_BIT set. %s", func_name,
6163                            validation_error_map[VALIDATION_ERROR_00741]);
6164        }
6165    } else if (vk_format_is_stencil_only(format)) {
6166        if ((aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT) != VK_IMAGE_ASPECT_STENCIL_BIT) {
6167            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
6168                            (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE",
6169                            "%s: Stencil-only image formats must have the VK_IMAGE_ASPECT_STENCIL_BIT set. %s", func_name,
6170                            validation_error_map[VALIDATION_ERROR_00741]);
6171        } else if ((aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT) != aspect_mask) {
6172            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
6173                            (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE",
6174                            "%s: Stencil-only image formats can have only the VK_IMAGE_ASPECT_STENCIL_BIT set. %s", func_name,
6175                            validation_error_map[VALIDATION_ERROR_00741]);
6176        }
6177    }
6178    return skip;
6179}
6180
6181bool ValidateImageSubrangeLevelLayerCounts(layer_data *dev_data, const VkImageSubresourceRange &subresourceRange,
6182                                           const char *func_name) {
6183    bool skip = false;
6184    if (subresourceRange.levelCount == 0) {
6185        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6186                        VALIDATION_ERROR_00768, "IMAGE", "%s called with 0 in subresourceRange.levelCount. %s", func_name,
6187                        validation_error_map[VALIDATION_ERROR_00768]);
6188    }
6189    if (subresourceRange.layerCount == 0) {
6190        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6191                        VALIDATION_ERROR_00769, "IMAGE", "%s called with 0 in subresourceRange.layerCount. %s", func_name,
6192                        validation_error_map[VALIDATION_ERROR_00769]);
6193    }
6194    return skip;
6195}
6196
6197static bool PreCallValidateCreateImageView(layer_data *dev_data, const VkImageViewCreateInfo *create_info) {
6198    bool skip = false;
6199    IMAGE_STATE *image_state = getImageState(dev_data, create_info->image);
6200    if (image_state) {
6201        skip |= ValidateImageUsageFlags(
6202            dev_data, image_state, VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT |
6203                                       VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
6204            false, -1, "vkCreateImageView()",
6205            "VK_IMAGE_USAGE_[SAMPLED|STORAGE|COLOR_ATTACHMENT|DEPTH_STENCIL_ATTACHMENT|INPUT_ATTACHMENT]_BIT");
6206        // If this isn't a sparse image, it needs to have memory backing it at CreateImageView time
6207        skip |= ValidateMemoryIsBoundToImage(dev_data, image_state, "vkCreateImageView()", VALIDATION_ERROR_02524);
6208        // Checks imported from image layer
6209        if (create_info->subresourceRange.baseMipLevel >= image_state->createInfo.mipLevels) {
6210            std::stringstream ss;
6211            ss << "vkCreateImageView called with baseMipLevel " << create_info->subresourceRange.baseMipLevel << " for image "
6212               << create_info->image << " that only has " << image_state->createInfo.mipLevels << " mip levels.";
6213            skip |=
6214                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6215                        VALIDATION_ERROR_00768, "IMAGE", "%s %s", ss.str().c_str(), validation_error_map[VALIDATION_ERROR_00768]);
6216        }
6217        if (create_info->subresourceRange.baseArrayLayer >= image_state->createInfo.arrayLayers) {
6218            std::stringstream ss;
6219            ss << "vkCreateImageView called with baseArrayLayer " << create_info->subresourceRange.baseArrayLayer << " for image "
6220               << create_info->image << " that only has " << image_state->createInfo.arrayLayers << " array layers.";
6221            skip |=
6222                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6223                        VALIDATION_ERROR_00769, "IMAGE", "%s %s", ss.str().c_str(), validation_error_map[VALIDATION_ERROR_00769]);
6224        }
6225        // TODO: Need new valid usage language for levelCount == 0 & layerCount == 0
6226        skip |= ValidateImageSubrangeLevelLayerCounts(dev_data, create_info->subresourceRange, "vkCreateImageView()");
6227
6228        VkImageCreateFlags image_flags = image_state->createInfo.flags;
6229        VkFormat image_format = image_state->createInfo.format;
6230        VkFormat view_format = create_info->format;
6231        VkImageAspectFlags aspect_mask = create_info->subresourceRange.aspectMask;
6232
6233        // Validate VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT state
6234        if (image_flags & VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT) {
6235            // Format MUST be compatible (in the same format compatibility class) as the format the image was created with
6236            if (vk_format_get_compatibility_class(image_format) != vk_format_get_compatibility_class(view_format)) {
6237                std::stringstream ss;
6238                ss << "vkCreateImageView(): ImageView format " << string_VkFormat(view_format)
6239                   << " is not in the same format compatibility class as image (" << (uint64_t)create_info->image << ")  format "
6240                   << string_VkFormat(image_format) << ".  Images created with the VK_IMAGE_CREATE_MUTABLE_FORMAT BIT "
6241                   << "can support ImageViews with differing formats but they must be in the same compatibility class.";
6242                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6243                                VALIDATION_ERROR_02171, "IMAGE", "%s %s", ss.str().c_str(),
6244                                validation_error_map[VALIDATION_ERROR_02171]);
6245            }
6246        } else {
6247            // Format MUST be IDENTICAL to the format the image was created with
6248            if (image_format != view_format) {
6249                std::stringstream ss;
6250                ss << "vkCreateImageView() format " << string_VkFormat(view_format) << " differs from image "
6251                   << (uint64_t)create_info->image << " format " << string_VkFormat(image_format)
6252                   << ".  Formats MUST be IDENTICAL unless VK_IMAGE_CREATE_MUTABLE_FORMAT BIT was set on image creation.";
6253                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6254                                VALIDATION_ERROR_02172, "IMAGE", "%s %s", ss.str().c_str(),
6255                                validation_error_map[VALIDATION_ERROR_02172]);
6256            }
6257        }
6258
6259        // Validate correct image aspect bits for desired formats and format consistency
6260        skip |= ValidateImageAspectMask(dev_data, image_state->image, image_format, aspect_mask, "vkCreateImageView()");
6261    }
6262    return skip;
6263}
6264
6265static inline void PostCallRecordCreateImageView(layer_data *dev_data, const VkImageViewCreateInfo *create_info, VkImageView view) {
6266    dev_data->imageViewMap[view] = unique_ptr<IMAGE_VIEW_STATE>(new IMAGE_VIEW_STATE(view, create_info));
6267    ResolveRemainingLevelsLayers(dev_data, &dev_data->imageViewMap[view].get()->create_info.subresourceRange,
6268                                 getImageState(dev_data, create_info->image));
6269}
6270
6271VKAPI_ATTR VkResult VKAPI_CALL CreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo,
6272                                               const VkAllocationCallbacks *pAllocator, VkImageView *pView) {
6273    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
6274    std::unique_lock<std::mutex> lock(global_lock);
6275    bool skip = PreCallValidateCreateImageView(dev_data, pCreateInfo);
6276    lock.unlock();
6277    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
6278    VkResult result = dev_data->dispatch_table.CreateImageView(device, pCreateInfo, pAllocator, pView);
6279    if (VK_SUCCESS == result) {
6280        lock.lock();
6281        PostCallRecordCreateImageView(dev_data, pCreateInfo, *pView);
6282        lock.unlock();
6283    }
6284
6285    return result;
6286}
6287
6288VKAPI_ATTR VkResult VKAPI_CALL CreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo,
6289                                           const VkAllocationCallbacks *pAllocator, VkFence *pFence) {
6290    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
6291    VkResult result = dev_data->dispatch_table.CreateFence(device, pCreateInfo, pAllocator, pFence);
6292    if (VK_SUCCESS == result) {
6293        std::lock_guard<std::mutex> lock(global_lock);
6294        auto &fence_node = dev_data->fenceMap[*pFence];
6295        fence_node.fence = *pFence;
6296        fence_node.createInfo = *pCreateInfo;
6297        fence_node.state = (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) ? FENCE_RETIRED : FENCE_UNSIGNALED;
6298    }
6299    return result;
6300}
6301
6302// TODO handle pipeline caches
6303VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo *pCreateInfo,
6304                                                   const VkAllocationCallbacks *pAllocator, VkPipelineCache *pPipelineCache) {
6305    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
6306    VkResult result = dev_data->dispatch_table.CreatePipelineCache(device, pCreateInfo, pAllocator, pPipelineCache);
6307    return result;
6308}
6309
6310VKAPI_ATTR void VKAPI_CALL DestroyPipelineCache(VkDevice device, VkPipelineCache pipelineCache,
6311                                                const VkAllocationCallbacks *pAllocator) {
6312    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
6313    dev_data->dispatch_table.DestroyPipelineCache(device, pipelineCache, pAllocator);
6314}
6315
6316VKAPI_ATTR VkResult VKAPI_CALL GetPipelineCacheData(VkDevice device, VkPipelineCache pipelineCache, size_t *pDataSize,
6317                                                    void *pData) {
6318    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
6319    VkResult result = dev_data->dispatch_table.GetPipelineCacheData(device, pipelineCache, pDataSize, pData);
6320    return result;
6321}
6322
6323VKAPI_ATTR VkResult VKAPI_CALL MergePipelineCaches(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount,
6324                                                   const VkPipelineCache *pSrcCaches) {
6325    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
6326    VkResult result = dev_data->dispatch_table.MergePipelineCaches(device, dstCache, srcCacheCount, pSrcCaches);
6327    return result;
6328}
6329
6330// utility function to set collective state for pipeline
6331void set_pipeline_state(PIPELINE_STATE *pPipe) {
6332    // If any attachment used by this pipeline has blendEnable, set top-level blendEnable
6333    if (pPipe->graphicsPipelineCI.pColorBlendState) {
6334        for (size_t i = 0; i < pPipe->attachments.size(); ++i) {
6335            if (VK_TRUE == pPipe->attachments[i].blendEnable) {
6336                if (((pPipe->attachments[i].dstAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6337                     (pPipe->attachments[i].dstAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6338                    ((pPipe->attachments[i].dstColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6339                     (pPipe->attachments[i].dstColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6340                    ((pPipe->attachments[i].srcAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6341                     (pPipe->attachments[i].srcAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6342                    ((pPipe->attachments[i].srcColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6343                     (pPipe->attachments[i].srcColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA))) {
6344                    pPipe->blendConstantsEnabled = true;
6345                }
6346            }
6347        }
6348    }
6349}
6350
6351static bool PreCallCreateGraphicsPipelines(layer_data *device_data, uint32_t count,
6352                                           const VkGraphicsPipelineCreateInfo *create_infos, vector<PIPELINE_STATE *> &pipe_state) {
6353    bool skip = false;
6354    instance_layer_data *instance_data =
6355        GetLayerDataPtr(get_dispatch_key(device_data->instance_data->instance), instance_layer_data_map);
6356
6357    for (uint32_t i = 0; i < count; i++) {
6358        skip |= verifyPipelineCreateState(device_data, pipe_state, i);
6359        if (create_infos[i].pVertexInputState != NULL) {
6360            for (uint32_t j = 0; j < create_infos[i].pVertexInputState->vertexAttributeDescriptionCount; j++) {
6361                VkFormat format = create_infos[i].pVertexInputState->pVertexAttributeDescriptions[j].format;
6362                // Internal call to get format info.  Still goes through layers, could potentially go directly to ICD.
6363                VkFormatProperties properties;
6364                instance_data->dispatch_table.GetPhysicalDeviceFormatProperties(device_data->physical_device, format, &properties);
6365                if ((properties.bufferFeatures & VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT) == 0) {
6366                    skip |= log_msg(
6367                        device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
6368                        __LINE__, VALIDATION_ERROR_01413, "IMAGE",
6369                        "vkCreateGraphicsPipelines: pCreateInfo[%d].pVertexInputState->vertexAttributeDescriptions[%d].format "
6370                        "(%s) is not a supported vertex buffer format. %s",
6371                        i, j, string_VkFormat(format), validation_error_map[VALIDATION_ERROR_01413]);
6372                }
6373            }
6374        }
6375    }
6376    return skip;
6377}
6378
6379VKAPI_ATTR VkResult VKAPI_CALL CreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
6380                                                       const VkGraphicsPipelineCreateInfo *pCreateInfos,
6381                                                       const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines) {
6382    // TODO What to do with pipelineCache?
6383    // The order of operations here is a little convoluted but gets the job done
6384    //  1. Pipeline create state is first shadowed into PIPELINE_STATE struct
6385    //  2. Create state is then validated (which uses flags setup during shadowing)
6386    //  3. If everything looks good, we'll then create the pipeline and add NODE to pipelineMap
6387    bool skip = false;
6388    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
6389    vector<PIPELINE_STATE *> pipe_state(count);
6390    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
6391
6392    uint32_t i = 0;
6393    std::unique_lock<std::mutex> lock(global_lock);
6394
6395    for (i = 0; i < count; i++) {
6396        pipe_state[i] = new PIPELINE_STATE;
6397        pipe_state[i]->initGraphicsPipeline(&pCreateInfos[i]);
6398        pipe_state[i]->render_pass_ci.initialize(getRenderPassState(dev_data, pCreateInfos[i].renderPass)->createInfo.ptr());
6399        pipe_state[i]->pipeline_layout = *getPipelineLayout(dev_data, pCreateInfos[i].layout);
6400    }
6401    skip |= PreCallCreateGraphicsPipelines(dev_data, count, pCreateInfos, pipe_state);
6402
6403    if (skip) {
6404        for (i = 0; i < count; i++) {
6405            delete pipe_state[i];
6406            pPipelines[i] = VK_NULL_HANDLE;
6407        }
6408        return VK_ERROR_VALIDATION_FAILED_EXT;
6409    }
6410
6411    lock.unlock();
6412    auto result =
6413        dev_data->dispatch_table.CreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines);
6414    lock.lock();
6415    for (i = 0; i < count; i++) {
6416        if (pPipelines[i] == VK_NULL_HANDLE) {
6417            delete pipe_state[i];
6418        } else {
6419            pipe_state[i]->pipeline = pPipelines[i];
6420            dev_data->pipelineMap[pipe_state[i]->pipeline] = pipe_state[i];
6421        }
6422    }
6423
6424    return result;
6425}
6426
6427VKAPI_ATTR VkResult VKAPI_CALL CreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
6428                                                      const VkComputePipelineCreateInfo *pCreateInfos,
6429                                                      const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines) {
6430    bool skip = false;
6431
6432    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
6433    vector<PIPELINE_STATE *> pPipeState(count);
6434    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
6435
6436    uint32_t i = 0;
6437    std::unique_lock<std::mutex> lock(global_lock);
6438    for (i = 0; i < count; i++) {
6439        // TODO: Verify compute stage bits
6440
6441        // Create and initialize internal tracking data structure
6442        pPipeState[i] = new PIPELINE_STATE;
6443        pPipeState[i]->initComputePipeline(&pCreateInfos[i]);
6444        pPipeState[i]->pipeline_layout = *getPipelineLayout(dev_data, pCreateInfos[i].layout);
6445
6446        // TODO: Add Compute Pipeline Verification
6447        skip |= !validate_compute_pipeline(dev_data->report_data, pPipeState[i], &dev_data->enabled_features,
6448                                           dev_data->shaderModuleMap);
6449        // skip |= verifyPipelineCreateState(dev_data, pPipeState[i]);
6450    }
6451
6452    if (skip) {
6453        for (i = 0; i < count; i++) {
6454            // Clean up any locally allocated data structures
6455            delete pPipeState[i];
6456            pPipelines[i] = VK_NULL_HANDLE;
6457        }
6458        return VK_ERROR_VALIDATION_FAILED_EXT;
6459    }
6460
6461    lock.unlock();
6462    auto result =
6463        dev_data->dispatch_table.CreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines);
6464    lock.lock();
6465    for (i = 0; i < count; i++) {
6466        if (pPipelines[i] == VK_NULL_HANDLE) {
6467            delete pPipeState[i];
6468        } else {
6469            pPipeState[i]->pipeline = pPipelines[i];
6470            dev_data->pipelineMap[pPipeState[i]->pipeline] = pPipeState[i];
6471        }
6472    }
6473
6474    return result;
6475}
6476
6477VKAPI_ATTR VkResult VKAPI_CALL CreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo,
6478                                             const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) {
6479    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
6480    VkResult result = dev_data->dispatch_table.CreateSampler(device, pCreateInfo, pAllocator, pSampler);
6481    if (VK_SUCCESS == result) {
6482        std::lock_guard<std::mutex> lock(global_lock);
6483        dev_data->samplerMap[*pSampler] = unique_ptr<SAMPLER_STATE>(new SAMPLER_STATE(pSampler, pCreateInfo));
6484    }
6485    return result;
6486}
6487
6488static bool PreCallValidateCreateDescriptorSetLayout(layer_data *dev_data, const VkDescriptorSetLayoutCreateInfo *create_info) {
6489    if (dev_data->instance_data->disabled.create_descriptor_set_layout) return false;
6490    return cvdescriptorset::DescriptorSetLayout::ValidateCreateInfo(dev_data->report_data, create_info);
6491}
6492
6493static void PostCallRecordCreateDescriptorSetLayout(layer_data *dev_data, const VkDescriptorSetLayoutCreateInfo *create_info,
6494                                                    VkDescriptorSetLayout set_layout) {
6495    // TODO: Convert this to unique_ptr to avoid leaks
6496    dev_data->descriptorSetLayoutMap[set_layout] = new cvdescriptorset::DescriptorSetLayout(create_info, set_layout);
6497}
6498
6499VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
6500                                                         const VkAllocationCallbacks *pAllocator,
6501                                                         VkDescriptorSetLayout *pSetLayout) {
6502    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
6503    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
6504    std::unique_lock<std::mutex> lock(global_lock);
6505    bool skip = PreCallValidateCreateDescriptorSetLayout(dev_data, pCreateInfo);
6506    if (!skip) {
6507        lock.unlock();
6508        result = dev_data->dispatch_table.CreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout);
6509        if (VK_SUCCESS == result) {
6510            lock.lock();
6511            PostCallRecordCreateDescriptorSetLayout(dev_data, pCreateInfo, *pSetLayout);
6512        }
6513    }
6514    return result;
6515}
6516
6517// Used by CreatePipelineLayout and CmdPushConstants.
6518// Note that the index argument is optional and only used by CreatePipelineLayout.
6519static bool validatePushConstantRange(const layer_data *dev_data, const uint32_t offset, const uint32_t size,
6520                                      const char *caller_name, uint32_t index = 0) {
6521    if (dev_data->instance_data->disabled.push_constant_range) return false;
6522    uint32_t const maxPushConstantsSize = dev_data->phys_dev_properties.properties.limits.maxPushConstantsSize;
6523    bool skip_call = false;
6524    // Check that offset + size don't exceed the max.
6525    // Prevent arithetic overflow here by avoiding addition and testing in this order.
6526    if ((offset >= maxPushConstantsSize) || (size > maxPushConstantsSize - offset)) {
6527        // This is a pain just to adapt the log message to the caller, but better to sort it out only when there is a problem.
6528        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
6529            if (offset >= maxPushConstantsSize) {
6530                skip_call |=
6531                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6532                            VALIDATION_ERROR_00877, "DS",
6533                            "%s call has push constants index %u with offset %u that "
6534                            "exceeds this device's maxPushConstantSize of %u. %s",
6535                            caller_name, index, offset, maxPushConstantsSize, validation_error_map[VALIDATION_ERROR_00877]);
6536            }
6537            if (size > maxPushConstantsSize - offset) {
6538                skip_call |=
6539                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6540                            VALIDATION_ERROR_00880, "DS",
6541                            "%s call has push constants index %u with offset %u and size %u that "
6542                            "exceeds this device's maxPushConstantSize of %u. %s",
6543                            caller_name, index, offset, size, maxPushConstantsSize, validation_error_map[VALIDATION_ERROR_00880]);
6544            }
6545        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
6546            if (offset >= maxPushConstantsSize) {
6547                skip_call |=
6548                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6549                            VALIDATION_ERROR_00991, "DS",
6550                            "%s call has push constants index %u with offset %u that "
6551                            "exceeds this device's maxPushConstantSize of %u. %s",
6552                            caller_name, index, offset, maxPushConstantsSize, validation_error_map[VALIDATION_ERROR_00991]);
6553            }
6554            if (size > maxPushConstantsSize - offset) {
6555                skip_call |=
6556                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6557                            VALIDATION_ERROR_00992, "DS",
6558                            "%s call has push constants index %u with offset %u and size %u that "
6559                            "exceeds this device's maxPushConstantSize of %u. %s",
6560                            caller_name, index, offset, size, maxPushConstantsSize, validation_error_map[VALIDATION_ERROR_00992]);
6561            }
6562        } else {
6563            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6564                                 DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
6565        }
6566    }
6567    // size needs to be non-zero and a multiple of 4.
6568    if ((size == 0) || ((size & 0x3) != 0)) {
6569        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
6570            if (size == 0) {
6571                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
6572                                     __LINE__, VALIDATION_ERROR_00878, "DS",
6573                                     "%s call has push constants index %u with "
6574                                     "size %u. Size must be greater than zero. %s",
6575                                     caller_name, index, size, validation_error_map[VALIDATION_ERROR_00878]);
6576            }
6577            if (size & 0x3) {
6578                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
6579                                     __LINE__, VALIDATION_ERROR_00879, "DS",
6580                                     "%s call has push constants index %u with "
6581                                     "size %u. Size must be a multiple of 4. %s",
6582                                     caller_name, index, size, validation_error_map[VALIDATION_ERROR_00879]);
6583            }
6584        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
6585            if (size == 0) {
6586                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
6587                                     __LINE__, VALIDATION_ERROR_01000, "DS",
6588                                     "%s call has push constants index %u with "
6589                                     "size %u. Size must be greater than zero. %s",
6590                                     caller_name, index, size, validation_error_map[VALIDATION_ERROR_01000]);
6591            }
6592            if (size & 0x3) {
6593                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
6594                                     __LINE__, VALIDATION_ERROR_00990, "DS",
6595                                     "%s call has push constants index %u with "
6596                                     "size %u. Size must be a multiple of 4. %s",
6597                                     caller_name, index, size, validation_error_map[VALIDATION_ERROR_00990]);
6598            }
6599        } else {
6600            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6601                                 DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
6602        }
6603    }
6604    // offset needs to be a multiple of 4.
6605    if ((offset & 0x3) != 0) {
6606        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
6607            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6608                                 VALIDATION_ERROR_02521, "DS",
6609                                 "%s call has push constants index %u with "
6610                                 "offset %u. Offset must be a multiple of 4. %s",
6611                                 caller_name, index, offset, validation_error_map[VALIDATION_ERROR_02521]);
6612        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
6613            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6614                                 VALIDATION_ERROR_00989, "DS",
6615                                 "%s call has push constants with "
6616                                 "offset %u. Offset must be a multiple of 4. %s",
6617                                 caller_name, offset, validation_error_map[VALIDATION_ERROR_00989]);
6618        } else {
6619            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6620                                 DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
6621        }
6622    }
6623    return skip_call;
6624}
6625
6626VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
6627                                                    const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout) {
6628    bool skip_call = false;
6629    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
6630    // TODO : Add checks for VALIDATION_ERRORS 865-871
6631    // Push Constant Range checks
6632    uint32_t i, j;
6633    for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
6634        skip_call |= validatePushConstantRange(dev_data, pCreateInfo->pPushConstantRanges[i].offset,
6635                                               pCreateInfo->pPushConstantRanges[i].size, "vkCreatePipelineLayout()", i);
6636        if (0 == pCreateInfo->pPushConstantRanges[i].stageFlags) {
6637            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6638                                 VALIDATION_ERROR_00882, "DS", "vkCreatePipelineLayout() call has no stageFlags set. %s",
6639                                 validation_error_map[VALIDATION_ERROR_00882]);
6640        }
6641    }
6642    if (skip_call) return VK_ERROR_VALIDATION_FAILED_EXT;
6643
6644    // Each range has been validated.  Now check for overlap between ranges (if they are good).
6645    // There's no explicit Valid Usage language against this, so issue a warning instead of an error.
6646    for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
6647        for (j = i + 1; j < pCreateInfo->pushConstantRangeCount; ++j) {
6648            const uint32_t minA = pCreateInfo->pPushConstantRanges[i].offset;
6649            const uint32_t maxA = minA + pCreateInfo->pPushConstantRanges[i].size;
6650            const uint32_t minB = pCreateInfo->pPushConstantRanges[j].offset;
6651            const uint32_t maxB = minB + pCreateInfo->pPushConstantRanges[j].size;
6652            if ((minA <= minB && maxA > minB) || (minB <= minA && maxB > minA)) {
6653                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
6654                                     __LINE__, DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS",
6655                                     "vkCreatePipelineLayout() call has push constants with "
6656                                     "overlapping ranges: %u:[%u, %u), %u:[%u, %u)",
6657                                     i, minA, maxA, j, minB, maxB);
6658            }
6659        }
6660    }
6661
6662    VkResult result = dev_data->dispatch_table.CreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout);
6663    if (VK_SUCCESS == result) {
6664        std::lock_guard<std::mutex> lock(global_lock);
6665        PIPELINE_LAYOUT_NODE &plNode = dev_data->pipelineLayoutMap[*pPipelineLayout];
6666        plNode.layout = *pPipelineLayout;
6667        plNode.set_layouts.resize(pCreateInfo->setLayoutCount);
6668        for (i = 0; i < pCreateInfo->setLayoutCount; ++i) {
6669            plNode.set_layouts[i] = getDescriptorSetLayout(dev_data, pCreateInfo->pSetLayouts[i]);
6670        }
6671        plNode.push_constant_ranges.resize(pCreateInfo->pushConstantRangeCount);
6672        for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
6673            plNode.push_constant_ranges[i] = pCreateInfo->pPushConstantRanges[i];
6674        }
6675    }
6676    return result;
6677}
6678
6679VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreateInfo,
6680                                                    const VkAllocationCallbacks *pAllocator, VkDescriptorPool *pDescriptorPool) {
6681    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
6682    VkResult result = dev_data->dispatch_table.CreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool);
6683    if (VK_SUCCESS == result) {
6684        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6685                    (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS", "Created Descriptor Pool 0x%" PRIxLEAST64,
6686                    (uint64_t)*pDescriptorPool))
6687            return VK_ERROR_VALIDATION_FAILED_EXT;
6688        DESCRIPTOR_POOL_STATE *pNewNode = new DESCRIPTOR_POOL_STATE(*pDescriptorPool, pCreateInfo);
6689        if (NULL == pNewNode) {
6690            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6691                        (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
6692                        "Out of memory while attempting to allocate DESCRIPTOR_POOL_STATE in vkCreateDescriptorPool()"))
6693                return VK_ERROR_VALIDATION_FAILED_EXT;
6694        } else {
6695            std::lock_guard<std::mutex> lock(global_lock);
6696            dev_data->descriptorPoolMap[*pDescriptorPool] = pNewNode;
6697        }
6698    } else {
6699        // Need to do anything if pool create fails?
6700    }
6701    return result;
6702}
6703
6704VKAPI_ATTR VkResult VKAPI_CALL ResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
6705                                                   VkDescriptorPoolResetFlags flags) {
6706    // TODO : Add checks for VALIDATION_ERROR_00928
6707    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
6708    VkResult result = dev_data->dispatch_table.ResetDescriptorPool(device, descriptorPool, flags);
6709    if (VK_SUCCESS == result) {
6710        std::lock_guard<std::mutex> lock(global_lock);
6711        clearDescriptorPool(dev_data, device, descriptorPool, flags);
6712    }
6713    return result;
6714}
6715// Ensure the pool contains enough descriptors and descriptor sets to satisfy
6716// an allocation request. Fills common_data with the total number of descriptors of each type required,
6717// as well as DescriptorSetLayout ptrs used for later update.
6718static bool PreCallValidateAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo,
6719                                                  cvdescriptorset::AllocateDescriptorSetsData *common_data) {
6720    if (dev_data->instance_data->disabled.allocate_descriptor_sets) return false;
6721    // All state checks for AllocateDescriptorSets is done in single function
6722    return cvdescriptorset::ValidateAllocateDescriptorSets(dev_data->report_data, pAllocateInfo, dev_data, common_data);
6723}
6724// Allocation state was good and call down chain was made so update state based on allocating descriptor sets
6725static void PostCallRecordAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo,
6726                                                 VkDescriptorSet *pDescriptorSets,
6727                                                 const cvdescriptorset::AllocateDescriptorSetsData *common_data) {
6728    // All the updates are contained in a single cvdescriptorset function
6729    cvdescriptorset::PerformAllocateDescriptorSets(pAllocateInfo, pDescriptorSets, common_data, &dev_data->descriptorPoolMap,
6730                                                   &dev_data->setMap, dev_data);
6731}
6732
6733VKAPI_ATTR VkResult VKAPI_CALL AllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo,
6734                                                      VkDescriptorSet *pDescriptorSets) {
6735    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
6736    std::unique_lock<std::mutex> lock(global_lock);
6737    cvdescriptorset::AllocateDescriptorSetsData common_data(pAllocateInfo->descriptorSetCount);
6738    bool skip_call = PreCallValidateAllocateDescriptorSets(dev_data, pAllocateInfo, &common_data);
6739    lock.unlock();
6740
6741    if (skip_call) return VK_ERROR_VALIDATION_FAILED_EXT;
6742
6743    VkResult result = dev_data->dispatch_table.AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
6744
6745    if (VK_SUCCESS == result) {
6746        lock.lock();
6747        PostCallRecordAllocateDescriptorSets(dev_data, pAllocateInfo, pDescriptorSets, &common_data);
6748        lock.unlock();
6749    }
6750    return result;
6751}
6752// Verify state before freeing DescriptorSets
6753static bool PreCallValidateFreeDescriptorSets(const layer_data *dev_data, VkDescriptorPool pool, uint32_t count,
6754                                              const VkDescriptorSet *descriptor_sets) {
6755    if (dev_data->instance_data->disabled.free_descriptor_sets) return false;
6756    bool skip_call = false;
6757    // First make sure sets being destroyed are not currently in-use
6758    for (uint32_t i = 0; i < count; ++i) {
6759        if (descriptor_sets[i] != VK_NULL_HANDLE) {
6760            skip_call |= validateIdleDescriptorSet(dev_data, descriptor_sets[i], "vkFreeDescriptorSets");
6761        }
6762    }
6763
6764    DESCRIPTOR_POOL_STATE *pool_state = getDescriptorPoolState(dev_data, pool);
6765    if (pool_state && !(VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT & pool_state->createInfo.flags)) {
6766        // Can't Free from a NON_FREE pool
6767        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6768                             reinterpret_cast<uint64_t &>(pool), __LINE__, VALIDATION_ERROR_00922, "DS",
6769                             "It is invalid to call vkFreeDescriptorSets() with a pool created without setting "
6770                             "VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT. %s",
6771                             validation_error_map[VALIDATION_ERROR_00922]);
6772    }
6773    return skip_call;
6774}
6775// Sets have been removed from the pool so update underlying state
6776static void PostCallRecordFreeDescriptorSets(layer_data *dev_data, VkDescriptorPool pool, uint32_t count,
6777                                             const VkDescriptorSet *descriptor_sets) {
6778    DESCRIPTOR_POOL_STATE *pool_state = getDescriptorPoolState(dev_data, pool);
6779    // Update available descriptor sets in pool
6780    pool_state->availableSets += count;
6781
6782    // For each freed descriptor add its resources back into the pool as available and remove from pool and setMap
6783    for (uint32_t i = 0; i < count; ++i) {
6784        if (descriptor_sets[i] != VK_NULL_HANDLE) {
6785            auto descriptor_set = dev_data->setMap[descriptor_sets[i]];
6786            uint32_t type_index = 0, descriptor_count = 0;
6787            for (uint32_t j = 0; j < descriptor_set->GetBindingCount(); ++j) {
6788                type_index = static_cast<uint32_t>(descriptor_set->GetTypeFromIndex(j));
6789                descriptor_count = descriptor_set->GetDescriptorCountFromIndex(j);
6790                pool_state->availableDescriptorTypeCount[type_index] += descriptor_count;
6791            }
6792            freeDescriptorSet(dev_data, descriptor_set);
6793            pool_state->sets.erase(descriptor_set);
6794        }
6795    }
6796}
6797
6798VKAPI_ATTR VkResult VKAPI_CALL FreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count,
6799                                                  const VkDescriptorSet *pDescriptorSets) {
6800    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
6801    // Make sure that no sets being destroyed are in-flight
6802    std::unique_lock<std::mutex> lock(global_lock);
6803    bool skip_call = PreCallValidateFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
6804    lock.unlock();
6805
6806    if (skip_call) return VK_ERROR_VALIDATION_FAILED_EXT;
6807    VkResult result = dev_data->dispatch_table.FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets);
6808    if (VK_SUCCESS == result) {
6809        lock.lock();
6810        PostCallRecordFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
6811        lock.unlock();
6812    }
6813    return result;
6814}
6815// TODO : This is a Proof-of-concept for core validation architecture
6816//  Really we'll want to break out these functions to separate files but
6817//  keeping it all together here to prove out design
6818// PreCallValidate* handles validating all of the state prior to calling down chain to UpdateDescriptorSets()
6819static bool PreCallValidateUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
6820                                                const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
6821                                                const VkCopyDescriptorSet *pDescriptorCopies) {
6822    if (dev_data->instance_data->disabled.update_descriptor_sets) return false;
6823    // First thing to do is perform map look-ups.
6824    // NOTE : UpdateDescriptorSets is somewhat unique in that it's operating on a number of DescriptorSets
6825    //  so we can't just do a single map look-up up-front, but do them individually in functions below
6826
6827    // Now make call(s) that validate state, but don't perform state updates in this function
6828    // Note, here DescriptorSets is unique in that we don't yet have an instance. Using a helper function in the
6829    //  namespace which will parse params and make calls into specific class instances
6830    return cvdescriptorset::ValidateUpdateDescriptorSets(dev_data->report_data, dev_data, descriptorWriteCount, pDescriptorWrites,
6831                                                         descriptorCopyCount, pDescriptorCopies);
6832}
6833// PostCallRecord* handles recording state updates following call down chain to UpdateDescriptorSets()
6834static void PostCallRecordUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
6835                                               const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
6836                                               const VkCopyDescriptorSet *pDescriptorCopies) {
6837    cvdescriptorset::PerformUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6838                                                 pDescriptorCopies);
6839}
6840
6841VKAPI_ATTR void VKAPI_CALL UpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount,
6842                                                const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
6843                                                const VkCopyDescriptorSet *pDescriptorCopies) {
6844    // Only map look-up at top level is for device-level layer_data
6845    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
6846    std::unique_lock<std::mutex> lock(global_lock);
6847    bool skip_call = PreCallValidateUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6848                                                         pDescriptorCopies);
6849    lock.unlock();
6850    if (!skip_call) {
6851        dev_data->dispatch_table.UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6852                                                      pDescriptorCopies);
6853        lock.lock();
6854        // Since UpdateDescriptorSets() is void, nothing to check prior to updating state
6855        PostCallRecordUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6856                                           pDescriptorCopies);
6857    }
6858}
6859
6860VKAPI_ATTR VkResult VKAPI_CALL AllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pCreateInfo,
6861                                                      VkCommandBuffer *pCommandBuffer) {
6862    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
6863    VkResult result = dev_data->dispatch_table.AllocateCommandBuffers(device, pCreateInfo, pCommandBuffer);
6864    if (VK_SUCCESS == result) {
6865        std::unique_lock<std::mutex> lock(global_lock);
6866        auto pPool = getCommandPoolNode(dev_data, pCreateInfo->commandPool);
6867
6868        if (pPool) {
6869            for (uint32_t i = 0; i < pCreateInfo->commandBufferCount; i++) {
6870                // Add command buffer to its commandPool map
6871                pPool->commandBuffers.push_back(pCommandBuffer[i]);
6872                GLOBAL_CB_NODE *pCB = new GLOBAL_CB_NODE;
6873                // Add command buffer to map
6874                dev_data->commandBufferMap[pCommandBuffer[i]] = pCB;
6875                resetCB(dev_data, pCommandBuffer[i]);
6876                pCB->createInfo = *pCreateInfo;
6877                pCB->device = device;
6878            }
6879        }
6880        lock.unlock();
6881    }
6882    return result;
6883}
6884
6885// Add bindings between the given cmd buffer & framebuffer and the framebuffer's children
6886static void AddFramebufferBinding(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, FRAMEBUFFER_STATE *fb_state) {
6887    addCommandBufferBinding(&fb_state->cb_bindings,
6888                            {reinterpret_cast<uint64_t &>(fb_state->framebuffer), VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT},
6889                            cb_state);
6890    for (auto attachment : fb_state->attachments) {
6891        auto view_state = attachment.view_state;
6892        if (view_state) {
6893            AddCommandBufferBindingImageView(dev_data, cb_state, view_state);
6894        }
6895        auto rp_state = getRenderPassState(dev_data, fb_state->createInfo.renderPass);
6896        if (rp_state) {
6897            addCommandBufferBinding(
6898                &rp_state->cb_bindings,
6899                {reinterpret_cast<uint64_t &>(rp_state->renderPass), VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT}, cb_state);
6900        }
6901    }
6902}
6903
6904VKAPI_ATTR VkResult VKAPI_CALL BeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) {
6905    bool skip_call = false;
6906    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6907    std::unique_lock<std::mutex> lock(global_lock);
6908    // Validate command buffer level
6909    GLOBAL_CB_NODE *cb_node = getCBNode(dev_data, commandBuffer);
6910    if (cb_node) {
6911        // This implicitly resets the Cmd Buffer so make sure any fence is done and then clear memory references
6912        if (dev_data->globalInFlightCmdBuffers.count(commandBuffer)) {
6913            skip_call |=
6914                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6915                        (uint64_t)commandBuffer, __LINE__, VALIDATION_ERROR_00104, "MEM",
6916                        "Calling vkBeginCommandBuffer() on active command buffer 0x%p before it has completed. "
6917                        "You must check command buffer fence before this call. %s",
6918                        commandBuffer, validation_error_map[VALIDATION_ERROR_00104]);
6919        }
6920        clear_cmd_buf_and_mem_references(dev_data, cb_node);
6921        if (cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
6922            // Secondary Command Buffer
6923            const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo;
6924            if (!pInfo) {
6925                skip_call |=
6926                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6927                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, VALIDATION_ERROR_00106, "DS",
6928                            "vkBeginCommandBuffer(): Secondary Command Buffer (0x%p) must have inheritance info. %s", commandBuffer,
6929                            validation_error_map[VALIDATION_ERROR_00106]);
6930            } else {
6931                if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
6932                    // Object_tracker makes sure these objects are valid
6933                    assert(pInfo->renderPass);
6934                    assert(pInfo->framebuffer);
6935                    string errorString = "";
6936                    auto framebuffer = getFramebufferState(dev_data, pInfo->framebuffer);
6937                    if (framebuffer) {
6938                        if ((framebuffer->createInfo.renderPass != pInfo->renderPass) &&
6939                            !verify_renderpass_compatibility(dev_data, framebuffer->renderPassCreateInfo.ptr(),
6940                                                             getRenderPassState(dev_data, pInfo->renderPass)->createInfo.ptr(),
6941                                                             errorString)) {
6942                            // renderPass that framebuffer was created with must be compatible with local renderPass
6943                            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6944                                                 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6945                                                 reinterpret_cast<uint64_t>(commandBuffer), __LINE__, VALIDATION_ERROR_00112, "DS",
6946                                                 "vkBeginCommandBuffer(): Secondary Command "
6947                                                 "Buffer (0x%p) renderPass (0x%" PRIxLEAST64
6948                                                 ") is incompatible w/ framebuffer "
6949                                                 "(0x%" PRIxLEAST64 ") w/ render pass (0x%" PRIxLEAST64 ") due to: %s. %s",
6950                                                 commandBuffer, reinterpret_cast<const uint64_t &>(pInfo->renderPass),
6951                                                 reinterpret_cast<const uint64_t &>(pInfo->framebuffer),
6952                                                 reinterpret_cast<uint64_t &>(framebuffer->createInfo.renderPass),
6953                                                 errorString.c_str(), validation_error_map[VALIDATION_ERROR_00112]);
6954                        }
6955                        // Connect this framebuffer and its children to this cmdBuffer
6956                        AddFramebufferBinding(dev_data, cb_node, framebuffer);
6957                    }
6958                }
6959                if ((pInfo->occlusionQueryEnable == VK_FALSE || dev_data->enabled_features.occlusionQueryPrecise == VK_FALSE) &&
6960                    (pInfo->queryFlags & VK_QUERY_CONTROL_PRECISE_BIT)) {
6961                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6962                                         VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(commandBuffer),
6963                                         __LINE__, VALIDATION_ERROR_00107, "DS",
6964                                         "vkBeginCommandBuffer(): Secondary Command Buffer (0x%p) must not have "
6965                                         "VK_QUERY_CONTROL_PRECISE_BIT if occulusionQuery is disabled or the device does not "
6966                                         "support precise occlusion queries. %s",
6967                                         commandBuffer, validation_error_map[VALIDATION_ERROR_00107]);
6968                }
6969            }
6970            if (pInfo && pInfo->renderPass != VK_NULL_HANDLE) {
6971                auto renderPass = getRenderPassState(dev_data, pInfo->renderPass);
6972                if (renderPass) {
6973                    if (pInfo->subpass >= renderPass->createInfo.subpassCount) {
6974                        skip_call |= log_msg(
6975                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6976                            (uint64_t)commandBuffer, __LINE__, VALIDATION_ERROR_00111, "DS",
6977                            "vkBeginCommandBuffer(): Secondary Command Buffers (0x%p) must have a subpass index (%d) "
6978                            "that is less than the number of subpasses (%d). %s",
6979                            commandBuffer, pInfo->subpass, renderPass->createInfo.subpassCount,
6980                            validation_error_map[VALIDATION_ERROR_00111]);
6981                    }
6982                }
6983            }
6984        }
6985        if (CB_RECORDING == cb_node->state) {
6986            skip_call |=
6987                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6988                        (uint64_t)commandBuffer, __LINE__, VALIDATION_ERROR_00103, "DS",
6989                        "vkBeginCommandBuffer(): Cannot call Begin on command buffer (0x%p"
6990                        ") in the RECORDING state. Must first call vkEndCommandBuffer(). %s",
6991                        commandBuffer, validation_error_map[VALIDATION_ERROR_00103]);
6992        } else if (CB_RECORDED == cb_node->state || (CB_INVALID == cb_node->state && CMD_END == cb_node->last_cmd)) {
6993            VkCommandPool cmdPool = cb_node->createInfo.commandPool;
6994            auto pPool = getCommandPoolNode(dev_data, cmdPool);
6995            if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
6996                skip_call |=
6997                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6998                            (uint64_t)commandBuffer, __LINE__, VALIDATION_ERROR_00105, "DS",
6999                            "Call to vkBeginCommandBuffer() on command buffer (0x%p"
7000                            ") attempts to implicitly reset cmdBuffer created from command pool (0x%" PRIxLEAST64
7001                            ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set. %s",
7002                            commandBuffer, (uint64_t)cmdPool, validation_error_map[VALIDATION_ERROR_00105]);
7003            }
7004            resetCB(dev_data, commandBuffer);
7005        }
7006        // Set updated state here in case implicit reset occurs above
7007        cb_node->state = CB_RECORDING;
7008        cb_node->beginInfo = *pBeginInfo;
7009        if (cb_node->beginInfo.pInheritanceInfo) {
7010            cb_node->inheritanceInfo = *(cb_node->beginInfo.pInheritanceInfo);
7011            cb_node->beginInfo.pInheritanceInfo = &cb_node->inheritanceInfo;
7012            // If we are a secondary command-buffer and inheriting.  Update the items we should inherit.
7013            if ((cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) &&
7014                (cb_node->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
7015                cb_node->activeRenderPass = getRenderPassState(dev_data, cb_node->beginInfo.pInheritanceInfo->renderPass);
7016                cb_node->activeSubpass = cb_node->beginInfo.pInheritanceInfo->subpass;
7017                cb_node->activeFramebuffer = cb_node->beginInfo.pInheritanceInfo->framebuffer;
7018                cb_node->framebuffers.insert(cb_node->beginInfo.pInheritanceInfo->framebuffer);
7019            }
7020        }
7021    }
7022    lock.unlock();
7023    if (skip_call) {
7024        return VK_ERROR_VALIDATION_FAILED_EXT;
7025    }
7026    VkResult result = dev_data->dispatch_table.BeginCommandBuffer(commandBuffer, pBeginInfo);
7027
7028    return result;
7029}
7030
7031VKAPI_ATTR VkResult VKAPI_CALL EndCommandBuffer(VkCommandBuffer commandBuffer) {
7032    bool skip_call = false;
7033    VkResult result = VK_SUCCESS;
7034    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7035    std::unique_lock<std::mutex> lock(global_lock);
7036    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7037    if (pCB) {
7038        if ((VK_COMMAND_BUFFER_LEVEL_PRIMARY == pCB->createInfo.level) ||
7039            !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
7040            // This needs spec clarification to update valid usage, see comments in PR:
7041            // https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/pull/516#discussion_r63013756
7042            skip_call |= insideRenderPass(dev_data, pCB, "vkEndCommandBuffer()", VALIDATION_ERROR_00123);
7043        }
7044        skip_call |= ValidateCmd(dev_data, pCB, CMD_END, "vkEndCommandBuffer()");
7045        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_END);
7046        for (auto query : pCB->activeQueries) {
7047            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7048                                 VALIDATION_ERROR_00124, "DS",
7049                                 "Ending command buffer with in progress query: queryPool 0x%" PRIx64 ", index %d. %s",
7050                                 (uint64_t)(query.pool), query.index, validation_error_map[VALIDATION_ERROR_00124]);
7051        }
7052    }
7053    if (!skip_call) {
7054        lock.unlock();
7055        result = dev_data->dispatch_table.EndCommandBuffer(commandBuffer);
7056        lock.lock();
7057        if (VK_SUCCESS == result) {
7058            pCB->state = CB_RECORDED;
7059            // Reset CB status flags
7060            pCB->status = 0;
7061        }
7062    } else {
7063        result = VK_ERROR_VALIDATION_FAILED_EXT;
7064    }
7065    lock.unlock();
7066    return result;
7067}
7068
7069VKAPI_ATTR VkResult VKAPI_CALL ResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) {
7070    bool skip_call = false;
7071    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7072    std::unique_lock<std::mutex> lock(global_lock);
7073    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7074    VkCommandPool cmdPool = pCB->createInfo.commandPool;
7075    auto pPool = getCommandPoolNode(dev_data, cmdPool);
7076    if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
7077        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7078                             (uint64_t)commandBuffer, __LINE__, VALIDATION_ERROR_00093, "DS",
7079                             "Attempt to reset command buffer (0x%p) created from command pool (0x%" PRIxLEAST64
7080                             ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set. %s",
7081                             commandBuffer, (uint64_t)cmdPool, validation_error_map[VALIDATION_ERROR_00093]);
7082    }
7083    skip_call |= checkCommandBufferInFlight(dev_data, pCB, "reset", VALIDATION_ERROR_00092);
7084    lock.unlock();
7085    if (skip_call) return VK_ERROR_VALIDATION_FAILED_EXT;
7086    VkResult result = dev_data->dispatch_table.ResetCommandBuffer(commandBuffer, flags);
7087    if (VK_SUCCESS == result) {
7088        lock.lock();
7089        dev_data->globalInFlightCmdBuffers.erase(commandBuffer);
7090        resetCB(dev_data, commandBuffer);
7091        lock.unlock();
7092    }
7093    return result;
7094}
7095
7096VKAPI_ATTR void VKAPI_CALL CmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
7097                                           VkPipeline pipeline) {
7098    bool skip = false;
7099    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7100    std::unique_lock<std::mutex> lock(global_lock);
7101    GLOBAL_CB_NODE *cb_state = getCBNode(dev_data, commandBuffer);
7102    if (cb_state) {
7103        skip |= ValidateCmd(dev_data, cb_state, CMD_BINDPIPELINE, "vkCmdBindPipeline()");
7104        UpdateCmdBufferLastCmd(dev_data, cb_state, CMD_BINDPIPELINE);
7105        if ((VK_PIPELINE_BIND_POINT_COMPUTE == pipelineBindPoint) && (cb_state->activeRenderPass)) {
7106            skip |=
7107                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
7108                        (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
7109                        "Incorrectly binding compute pipeline (0x%" PRIxLEAST64 ") during active RenderPass (0x%" PRIxLEAST64 ")",
7110                        (uint64_t)pipeline, (uint64_t)cb_state->activeRenderPass->renderPass);
7111        }
7112        // TODO: VALIDATION_ERROR_00594 VALIDATION_ERROR_00596
7113
7114        PIPELINE_STATE *pipe_state = getPipelineState(dev_data, pipeline);
7115        if (pipe_state) {
7116            cb_state->lastBound[pipelineBindPoint].pipeline_state = pipe_state;
7117            set_cb_pso_status(cb_state, pipe_state);
7118            set_pipeline_state(pipe_state);
7119        } else {
7120            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
7121                            (uint64_t)pipeline, __LINE__, VALIDATION_ERROR_00600, "DS",
7122                            "Attempt to bind Pipeline 0x%" PRIxLEAST64 " that doesn't exist! %s", (uint64_t)(pipeline),
7123                            validation_error_map[VALIDATION_ERROR_00600]);
7124        }
7125        addCommandBufferBinding(&pipe_state->cb_bindings,
7126                                {reinterpret_cast<uint64_t &>(pipeline), VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT}, cb_state);
7127        if (VK_PIPELINE_BIND_POINT_GRAPHICS == pipelineBindPoint) {
7128            // Add binding for child renderpass
7129            auto rp_state = getRenderPassState(dev_data, pipe_state->graphicsPipelineCI.renderPass);
7130            if (rp_state) {
7131                addCommandBufferBinding(
7132                    &rp_state->cb_bindings,
7133                    {reinterpret_cast<uint64_t &>(rp_state->renderPass), VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT}, cb_state);
7134            }
7135        }
7136    }
7137    lock.unlock();
7138    if (!skip) dev_data->dispatch_table.CmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline);
7139}
7140
7141VKAPI_ATTR void VKAPI_CALL CmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount,
7142                                          const VkViewport *pViewports) {
7143    bool skip_call = false;
7144    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7145    std::unique_lock<std::mutex> lock(global_lock);
7146    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7147    if (pCB) {
7148        skip_call |= ValidateCmd(dev_data, pCB, CMD_SETVIEWPORTSTATE, "vkCmdSetViewport()");
7149        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETVIEWPORTSTATE);
7150        pCB->viewportMask |= ((1u << viewportCount) - 1u) << firstViewport;
7151    }
7152    lock.unlock();
7153    if (!skip_call) dev_data->dispatch_table.CmdSetViewport(commandBuffer, firstViewport, viewportCount, pViewports);
7154}
7155
7156VKAPI_ATTR void VKAPI_CALL CmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount,
7157                                         const VkRect2D *pScissors) {
7158    bool skip_call = false;
7159    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7160    std::unique_lock<std::mutex> lock(global_lock);
7161    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7162    if (pCB) {
7163        skip_call |= ValidateCmd(dev_data, pCB, CMD_SETSCISSORSTATE, "vkCmdSetScissor()");
7164        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETSCISSORSTATE);
7165        pCB->scissorMask |= ((1u << scissorCount) - 1u) << firstScissor;
7166    }
7167    lock.unlock();
7168    if (!skip_call) dev_data->dispatch_table.CmdSetScissor(commandBuffer, firstScissor, scissorCount, pScissors);
7169}
7170
7171VKAPI_ATTR void VKAPI_CALL CmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) {
7172    bool skip_call = false;
7173    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7174    std::unique_lock<std::mutex> lock(global_lock);
7175    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7176    if (pCB) {
7177        skip_call |= ValidateCmd(dev_data, pCB, CMD_SETLINEWIDTHSTATE, "vkCmdSetLineWidth()");
7178        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETLINEWIDTHSTATE);
7179        pCB->status |= CBSTATUS_LINE_WIDTH_SET;
7180
7181        PIPELINE_STATE *pPipeTrav = pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline_state;
7182        if (pPipeTrav != NULL && !isDynamic(pPipeTrav, VK_DYNAMIC_STATE_LINE_WIDTH)) {
7183            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
7184                                 reinterpret_cast<uint64_t &>(commandBuffer), __LINE__, VALIDATION_ERROR_01476, "DS",
7185                                 "vkCmdSetLineWidth called but pipeline was created without VK_DYNAMIC_STATE_LINE_WIDTH "
7186                                 "flag.  This is undefined behavior and could be ignored. %s",
7187                                 validation_error_map[VALIDATION_ERROR_01476]);
7188        } else {
7189            skip_call |= verifyLineWidth(dev_data, DRAWSTATE_INVALID_SET, reinterpret_cast<uint64_t &>(commandBuffer), lineWidth);
7190        }
7191    }
7192    lock.unlock();
7193    if (!skip_call) dev_data->dispatch_table.CmdSetLineWidth(commandBuffer, lineWidth);
7194}
7195
7196VKAPI_ATTR void VKAPI_CALL CmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp,
7197                                           float depthBiasSlopeFactor) {
7198    bool skip_call = false;
7199    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7200    std::unique_lock<std::mutex> lock(global_lock);
7201    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7202    if (pCB) {
7203        skip_call |= ValidateCmd(dev_data, pCB, CMD_SETDEPTHBIASSTATE, "vkCmdSetDepthBias()");
7204        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETDEPTHBIASSTATE);
7205        pCB->status |= CBSTATUS_DEPTH_BIAS_SET;
7206    }
7207    lock.unlock();
7208    if (!skip_call)
7209        dev_data->dispatch_table.CmdSetDepthBias(commandBuffer, depthBiasConstantFactor, depthBiasClamp, depthBiasSlopeFactor);
7210}
7211
7212VKAPI_ATTR void VKAPI_CALL CmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) {
7213    bool skip_call = false;
7214    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7215    std::unique_lock<std::mutex> lock(global_lock);
7216    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7217    if (pCB) {
7218        skip_call |= ValidateCmd(dev_data, pCB, CMD_SETBLENDSTATE, "vkCmdSetBlendConstants()");
7219        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETBLENDSTATE);
7220        pCB->status |= CBSTATUS_BLEND_CONSTANTS_SET;
7221    }
7222    lock.unlock();
7223    if (!skip_call) dev_data->dispatch_table.CmdSetBlendConstants(commandBuffer, blendConstants);
7224}
7225
7226VKAPI_ATTR void VKAPI_CALL CmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) {
7227    bool skip_call = false;
7228    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7229    std::unique_lock<std::mutex> lock(global_lock);
7230    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7231    if (pCB) {
7232        skip_call |= ValidateCmd(dev_data, pCB, CMD_SETDEPTHBOUNDSSTATE, "vkCmdSetDepthBounds()");
7233        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETDEPTHBOUNDSSTATE);
7234        pCB->status |= CBSTATUS_DEPTH_BOUNDS_SET;
7235    }
7236    lock.unlock();
7237    if (!skip_call) dev_data->dispatch_table.CmdSetDepthBounds(commandBuffer, minDepthBounds, maxDepthBounds);
7238}
7239
7240VKAPI_ATTR void VKAPI_CALL CmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask,
7241                                                    uint32_t compareMask) {
7242    bool skip_call = false;
7243    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7244    std::unique_lock<std::mutex> lock(global_lock);
7245    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7246    if (pCB) {
7247        skip_call |= ValidateCmd(dev_data, pCB, CMD_SETSTENCILREADMASKSTATE, "vkCmdSetStencilCompareMask()");
7248        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETSTENCILREADMASKSTATE);
7249        pCB->status |= CBSTATUS_STENCIL_READ_MASK_SET;
7250    }
7251    lock.unlock();
7252    if (!skip_call) dev_data->dispatch_table.CmdSetStencilCompareMask(commandBuffer, faceMask, compareMask);
7253}
7254
7255VKAPI_ATTR void VKAPI_CALL CmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask) {
7256    bool skip_call = false;
7257    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7258    std::unique_lock<std::mutex> lock(global_lock);
7259    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7260    if (pCB) {
7261        skip_call |= ValidateCmd(dev_data, pCB, CMD_SETSTENCILWRITEMASKSTATE, "vkCmdSetStencilWriteMask()");
7262        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETSTENCILWRITEMASKSTATE);
7263        pCB->status |= CBSTATUS_STENCIL_WRITE_MASK_SET;
7264    }
7265    lock.unlock();
7266    if (!skip_call) dev_data->dispatch_table.CmdSetStencilWriteMask(commandBuffer, faceMask, writeMask);
7267}
7268
7269VKAPI_ATTR void VKAPI_CALL CmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference) {
7270    bool skip_call = false;
7271    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7272    std::unique_lock<std::mutex> lock(global_lock);
7273    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7274    if (pCB) {
7275        skip_call |= ValidateCmd(dev_data, pCB, CMD_SETSTENCILREFERENCESTATE, "vkCmdSetStencilReference()");
7276        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETSTENCILREFERENCESTATE);
7277        pCB->status |= CBSTATUS_STENCIL_REFERENCE_SET;
7278    }
7279    lock.unlock();
7280    if (!skip_call) dev_data->dispatch_table.CmdSetStencilReference(commandBuffer, faceMask, reference);
7281}
7282
7283VKAPI_ATTR void VKAPI_CALL CmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
7284                                                 VkPipelineLayout layout, uint32_t firstSet, uint32_t setCount,
7285                                                 const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount,
7286                                                 const uint32_t *pDynamicOffsets) {
7287    bool skip_call = false;
7288    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7289    std::unique_lock<std::mutex> lock(global_lock);
7290    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7291    if (pCB) {
7292        if (pCB->state == CB_RECORDING) {
7293            // Track total count of dynamic descriptor types to make sure we have an offset for each one
7294            uint32_t totalDynamicDescriptors = 0;
7295            string errorString = "";
7296            uint32_t lastSetIndex = firstSet + setCount - 1;
7297            if (lastSetIndex >= pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size()) {
7298                pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1);
7299                pCB->lastBound[pipelineBindPoint].dynamicOffsets.resize(lastSetIndex + 1);
7300            }
7301            auto oldFinalBoundSet = pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex];
7302            auto pipeline_layout = getPipelineLayout(dev_data, layout);
7303            for (uint32_t set_idx = 0; set_idx < setCount; set_idx++) {
7304                cvdescriptorset::DescriptorSet *descriptor_set = getSetNode(dev_data, pDescriptorSets[set_idx]);
7305                if (descriptor_set) {
7306                    pCB->lastBound[pipelineBindPoint].pipeline_layout = *pipeline_layout;
7307                    pCB->lastBound[pipelineBindPoint].boundDescriptorSets[set_idx + firstSet] = descriptor_set;
7308                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
7309                                         VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[set_idx],
7310                                         __LINE__, DRAWSTATE_NONE, "DS", "Descriptor Set 0x%" PRIxLEAST64 " bound on pipeline %s",
7311                                         (uint64_t)pDescriptorSets[set_idx], string_VkPipelineBindPoint(pipelineBindPoint));
7312                    if (!descriptor_set->IsUpdated() && (descriptor_set->GetTotalDescriptorCount() != 0)) {
7313                        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
7314                                             VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[set_idx],
7315                                             __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
7316                                             "Descriptor Set 0x%" PRIxLEAST64
7317                                             " bound but it was never updated. You may want to either update it or not bind it.",
7318                                             (uint64_t)pDescriptorSets[set_idx]);
7319                    }
7320                    // Verify that set being bound is compatible with overlapping setLayout of pipelineLayout
7321                    if (!verify_set_layout_compatibility(dev_data, descriptor_set, pipeline_layout, set_idx + firstSet,
7322                                                         errorString)) {
7323                        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7324                                             VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[set_idx],
7325                                             __LINE__, VALIDATION_ERROR_00974, "DS",
7326                                             "descriptorSet #%u being bound is not compatible with overlapping descriptorSetLayout "
7327                                             "at index %u of pipelineLayout 0x%" PRIxLEAST64 " due to: %s. %s",
7328                                             set_idx, set_idx + firstSet, reinterpret_cast<uint64_t &>(layout), errorString.c_str(),
7329                                             validation_error_map[VALIDATION_ERROR_00974]);
7330                    }
7331
7332                    auto setDynamicDescriptorCount = descriptor_set->GetDynamicDescriptorCount();
7333
7334                    pCB->lastBound[pipelineBindPoint].dynamicOffsets[firstSet + set_idx].clear();
7335
7336                    if (setDynamicDescriptorCount) {
7337                        // First make sure we won't overstep bounds of pDynamicOffsets array
7338                        if ((totalDynamicDescriptors + setDynamicDescriptorCount) > dynamicOffsetCount) {
7339                            skip_call |=
7340                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7341                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[set_idx],
7342                                        __LINE__, DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
7343                                        "descriptorSet #%u (0x%" PRIxLEAST64
7344                                        ") requires %u dynamicOffsets, but only %u dynamicOffsets are left in pDynamicOffsets "
7345                                        "array. There must be one dynamic offset for each dynamic descriptor being bound.",
7346                                        set_idx, (uint64_t)pDescriptorSets[set_idx], descriptor_set->GetDynamicDescriptorCount(),
7347                                        (dynamicOffsetCount - totalDynamicDescriptors));
7348                        } else {  // Validate and store dynamic offsets with the set
7349                            // Validate Dynamic Offset Minimums
7350                            uint32_t cur_dyn_offset = totalDynamicDescriptors;
7351                            for (uint32_t d = 0; d < descriptor_set->GetTotalDescriptorCount(); d++) {
7352                                if (descriptor_set->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
7353                                    if (vk_safe_modulo(
7354                                            pDynamicOffsets[cur_dyn_offset],
7355                                            dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment) != 0) {
7356                                        skip_call |= log_msg(
7357                                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7358                                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, VALIDATION_ERROR_00978,
7359                                            "DS",
7360                                            "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
7361                                            "device limit minUniformBufferOffsetAlignment 0x%" PRIxLEAST64 ". %s",
7362                                            cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
7363                                            dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment,
7364                                            validation_error_map[VALIDATION_ERROR_00978]);
7365                                    }
7366                                    cur_dyn_offset++;
7367                                } else if (descriptor_set->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
7368                                    if (vk_safe_modulo(
7369                                            pDynamicOffsets[cur_dyn_offset],
7370                                            dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment) != 0) {
7371                                        skip_call |= log_msg(
7372                                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7373                                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, VALIDATION_ERROR_00978,
7374                                            "DS",
7375                                            "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
7376                                            "device limit minStorageBufferOffsetAlignment 0x%" PRIxLEAST64 ". %s",
7377                                            cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
7378                                            dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment,
7379                                            validation_error_map[VALIDATION_ERROR_00978]);
7380                                    }
7381                                    cur_dyn_offset++;
7382                                }
7383                            }
7384
7385                            pCB->lastBound[pipelineBindPoint].dynamicOffsets[firstSet + set_idx] =
7386                                std::vector<uint32_t>(pDynamicOffsets + totalDynamicDescriptors,
7387                                                      pDynamicOffsets + totalDynamicDescriptors + setDynamicDescriptorCount);
7388                            // Keep running total of dynamic descriptor count to verify at the end
7389                            totalDynamicDescriptors += setDynamicDescriptorCount;
7390                        }
7391                    }
7392                } else {
7393                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7394                                         VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[set_idx],
7395                                         __LINE__, DRAWSTATE_INVALID_SET, "DS",
7396                                         "Attempt to bind descriptor set 0x%" PRIxLEAST64 " that doesn't exist!",
7397                                         (uint64_t)pDescriptorSets[set_idx]);
7398                }
7399                skip_call |= ValidateCmd(dev_data, pCB, CMD_BINDDESCRIPTORSETS, "vkCmdBindDescriptorSets()");
7400                UpdateCmdBufferLastCmd(dev_data, pCB, CMD_BINDDESCRIPTORSETS);
7401                // For any previously bound sets, need to set them to "invalid" if they were disturbed by this update
7402                if (firstSet > 0) {  // Check set #s below the first bound set
7403                    for (uint32_t i = 0; i < firstSet; ++i) {
7404                        if (pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] &&
7405                            !verify_set_layout_compatibility(dev_data, pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i],
7406                                                             pipeline_layout, i, errorString)) {
7407                            skip_call |= log_msg(
7408                                dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7409                                VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
7410                                (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], __LINE__, DRAWSTATE_NONE, "DS",
7411                                "DescriptorSet 0x%" PRIxLEAST64
7412                                " previously bound as set #%u was disturbed by newly bound pipelineLayout (0x%" PRIxLEAST64 ")",
7413                                (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], i, (uint64_t)layout);
7414                            pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] = VK_NULL_HANDLE;
7415                        }
7416                    }
7417                }
7418                // Check if newly last bound set invalidates any remaining bound sets
7419                if ((pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size() - 1) > (lastSetIndex)) {
7420                    if (oldFinalBoundSet &&
7421                        !verify_set_layout_compatibility(dev_data, oldFinalBoundSet, pipeline_layout, lastSetIndex, errorString)) {
7422                        auto old_set = oldFinalBoundSet->GetSet();
7423                        skip_call |=
7424                            log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7425                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, reinterpret_cast<uint64_t &>(old_set), __LINE__,
7426                                    DRAWSTATE_NONE, "DS", "DescriptorSet 0x%" PRIxLEAST64
7427                                                          " previously bound as set #%u is incompatible with set 0x%" PRIxLEAST64
7428                                                          " newly bound as set #%u so set #%u and any subsequent sets were "
7429                                                          "disturbed by newly bound pipelineLayout (0x%" PRIxLEAST64 ")",
7430                                    reinterpret_cast<uint64_t &>(old_set), lastSetIndex,
7431                                    (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex], lastSetIndex,
7432                                    lastSetIndex + 1, (uint64_t)layout);
7433                        pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1);
7434                    }
7435                }
7436            }
7437            //  dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound
7438            if (totalDynamicDescriptors != dynamicOffsetCount) {
7439                skip_call |=
7440                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7441                            (uint64_t)commandBuffer, __LINE__, VALIDATION_ERROR_00975, "DS",
7442                            "Attempting to bind %u descriptorSets with %u dynamic descriptors, but dynamicOffsetCount "
7443                            "is %u. It should exactly match the number of dynamic descriptors. %s",
7444                            setCount, totalDynamicDescriptors, dynamicOffsetCount, validation_error_map[VALIDATION_ERROR_00975]);
7445            }
7446        } else {
7447            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindDescriptorSets()");
7448        }
7449    }
7450    lock.unlock();
7451    if (!skip_call)
7452        dev_data->dispatch_table.CmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet, setCount,
7453                                                       pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
7454}
7455
7456VKAPI_ATTR void VKAPI_CALL CmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
7457                                              VkIndexType indexType) {
7458    bool skip_call = false;
7459    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7460    // TODO : Somewhere need to verify that IBs have correct usage state flagged
7461    std::unique_lock<std::mutex> lock(global_lock);
7462
7463    auto buffer_state = getBufferState(dev_data, buffer);
7464    auto cb_node = getCBNode(dev_data, commandBuffer);
7465    if (cb_node && buffer_state) {
7466        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buffer_state, "vkCmdBindIndexBuffer()", VALIDATION_ERROR_02543);
7467        std::function<bool()> function = [=]() {
7468            return ValidateBufferMemoryIsValid(dev_data, buffer_state, "vkCmdBindIndexBuffer()");
7469        };
7470        cb_node->validate_functions.push_back(function);
7471        skip_call |= ValidateCmd(dev_data, cb_node, CMD_BINDINDEXBUFFER, "vkCmdBindIndexBuffer()");
7472        UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_BINDINDEXBUFFER);
7473        VkDeviceSize offset_align = 0;
7474        switch (indexType) {
7475            case VK_INDEX_TYPE_UINT16:
7476                offset_align = 2;
7477                break;
7478            case VK_INDEX_TYPE_UINT32:
7479                offset_align = 4;
7480                break;
7481            default:
7482                // ParamChecker should catch bad enum, we'll also throw alignment error below if offset_align stays 0
7483                break;
7484        }
7485        if (!offset_align || (offset % offset_align)) {
7486            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7487                                 DRAWSTATE_VTX_INDEX_ALIGNMENT_ERROR, "DS",
7488                                 "vkCmdBindIndexBuffer() offset (0x%" PRIxLEAST64 ") does not fall on alignment (%s) boundary.",
7489                                 offset, string_VkIndexType(indexType));
7490        }
7491        cb_node->status |= CBSTATUS_INDEX_BUFFER_BOUND;
7492    } else {
7493        assert(0);
7494    }
7495    lock.unlock();
7496    if (!skip_call) dev_data->dispatch_table.CmdBindIndexBuffer(commandBuffer, buffer, offset, indexType);
7497}
7498
7499void updateResourceTracking(GLOBAL_CB_NODE *pCB, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers) {
7500    uint32_t end = firstBinding + bindingCount;
7501    if (pCB->currentDrawData.buffers.size() < end) {
7502        pCB->currentDrawData.buffers.resize(end);
7503    }
7504    for (uint32_t i = 0; i < bindingCount; ++i) {
7505        pCB->currentDrawData.buffers[i + firstBinding] = pBuffers[i];
7506    }
7507}
7508
7509static inline void updateResourceTrackingOnDraw(GLOBAL_CB_NODE *pCB) { pCB->drawData.push_back(pCB->currentDrawData); }
7510
7511VKAPI_ATTR void VKAPI_CALL CmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount,
7512                                                const VkBuffer *pBuffers, const VkDeviceSize *pOffsets) {
7513    bool skip_call = false;
7514    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7515    // TODO : Somewhere need to verify that VBs have correct usage state flagged
7516    std::unique_lock<std::mutex> lock(global_lock);
7517
7518    auto cb_node = getCBNode(dev_data, commandBuffer);
7519    if (cb_node) {
7520        for (uint32_t i = 0; i < bindingCount; ++i) {
7521            auto buffer_state = getBufferState(dev_data, pBuffers[i]);
7522            assert(buffer_state);
7523            skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buffer_state, "vkCmdBindVertexBuffers()", VALIDATION_ERROR_02546);
7524            std::function<bool()> function = [=]() {
7525                return ValidateBufferMemoryIsValid(dev_data, buffer_state, "vkCmdBindVertexBuffers()");
7526            };
7527            cb_node->validate_functions.push_back(function);
7528        }
7529        skip_call |= ValidateCmd(dev_data, cb_node, CMD_BINDVERTEXBUFFER, "vkCmdBindVertexBuffer()");
7530        UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_BINDVERTEXBUFFER);
7531        updateResourceTracking(cb_node, firstBinding, bindingCount, pBuffers);
7532    } else {
7533        skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindVertexBuffer()");
7534    }
7535    lock.unlock();
7536    if (!skip_call) dev_data->dispatch_table.CmdBindVertexBuffers(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets);
7537}
7538
7539// Expects global_lock to be held by caller
7540static void MarkStoreImagesAndBuffersAsWritten(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
7541    for (auto imageView : pCB->updateImages) {
7542        auto view_state = getImageViewState(dev_data, imageView);
7543        if (!view_state) continue;
7544
7545        auto image_state = getImageState(dev_data, view_state->create_info.image);
7546        assert(image_state);
7547        std::function<bool()> function = [=]() {
7548            SetImageMemoryValid(dev_data, image_state, true);
7549            return false;
7550        };
7551        pCB->validate_functions.push_back(function);
7552    }
7553    for (auto buffer : pCB->updateBuffers) {
7554        auto buffer_state = getBufferState(dev_data, buffer);
7555        assert(buffer_state);
7556        std::function<bool()> function = [=]() {
7557            SetBufferMemoryValid(dev_data, buffer_state, true);
7558            return false;
7559        };
7560        pCB->validate_functions.push_back(function);
7561    }
7562}
7563
7564// Generic function to handle validation for all CmdDraw* type functions
7565static bool ValidateCmdDrawType(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed, VkPipelineBindPoint bind_point,
7566                                CMD_TYPE cmd_type, GLOBAL_CB_NODE **cb_state, const char *caller,
7567                                UNIQUE_VALIDATION_ERROR_CODE msg_code, UNIQUE_VALIDATION_ERROR_CODE const dynamic_state_msg_code) {
7568    bool skip = false;
7569    *cb_state = getCBNode(dev_data, cmd_buffer);
7570    if (*cb_state) {
7571        skip |= ValidateCmd(dev_data, *cb_state, cmd_type, caller);
7572        skip |= ValidateDrawState(dev_data, *cb_state, indexed, bind_point, caller, dynamic_state_msg_code);
7573        skip |= (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point) ? outsideRenderPass(dev_data, *cb_state, caller, msg_code)
7574                                                                : insideRenderPass(dev_data, *cb_state, caller, msg_code);
7575    }
7576    return skip;
7577}
7578
7579// Generic function to handle state update for all CmdDraw* and CmdDispatch* type functions
7580static void UpdateStateCmdDrawDispatchType(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
7581                                           CMD_TYPE cmd_type) {
7582    UpdateDrawState(dev_data, cb_state, bind_point);
7583    MarkStoreImagesAndBuffersAsWritten(dev_data, cb_state);
7584    UpdateCmdBufferLastCmd(dev_data, cb_state, cmd_type);
7585}
7586
7587// Generic function to handle state update for all CmdDraw* type functions
7588static void UpdateStateCmdDrawType(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
7589                                   CMD_TYPE cmd_type, DRAW_TYPE draw_type) {
7590    UpdateStateCmdDrawDispatchType(dev_data, cb_state, bind_point, cmd_type);
7591    updateResourceTrackingOnDraw(cb_state);
7592    cb_state->drawCount[draw_type]++;
7593}
7594
7595static bool PreCallValidateCmdDraw(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed, VkPipelineBindPoint bind_point,
7596                                   GLOBAL_CB_NODE **cb_state, const char *caller) {
7597    return ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAW, cb_state, caller, VALIDATION_ERROR_01365,
7598                               VALIDATION_ERROR_02203);
7599}
7600
7601static void PostCallRecordCmdDraw(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
7602    UpdateStateCmdDrawType(dev_data, cb_state, bind_point, CMD_DRAW, DRAW);
7603}
7604
7605VKAPI_ATTR void VKAPI_CALL CmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
7606                                   uint32_t firstVertex, uint32_t firstInstance) {
7607    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7608    GLOBAL_CB_NODE *cb_state = nullptr;
7609    std::unique_lock<std::mutex> lock(global_lock);
7610    bool skip = PreCallValidateCmdDraw(dev_data, commandBuffer, false, VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state, "vkCmdDraw()");
7611    lock.unlock();
7612    if (!skip) {
7613        dev_data->dispatch_table.CmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
7614        lock.lock();
7615        PostCallRecordCmdDraw(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS);
7616        lock.unlock();
7617    }
7618}
7619
7620static bool PreCallValidateCmdDrawIndexed(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed,
7621                                          VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state, const char *caller) {
7622    return ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAWINDEXED, cb_state, caller, VALIDATION_ERROR_01372,
7623                               VALIDATION_ERROR_02216);
7624}
7625
7626static void PostCallRecordCmdDrawIndexed(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
7627    UpdateStateCmdDrawType(dev_data, cb_state, bind_point, CMD_DRAWINDEXED, DRAW_INDEXED);
7628}
7629
7630VKAPI_ATTR void VKAPI_CALL CmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount,
7631                                          uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance) {
7632    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7633    GLOBAL_CB_NODE *cb_state = nullptr;
7634    std::unique_lock<std::mutex> lock(global_lock);
7635    bool skip = PreCallValidateCmdDrawIndexed(dev_data, commandBuffer, true, VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state,
7636                                              "vkCmdDrawIndexed()");
7637    lock.unlock();
7638    if (!skip) {
7639        dev_data->dispatch_table.CmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset, firstInstance);
7640        lock.lock();
7641        PostCallRecordCmdDrawIndexed(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS);
7642        lock.unlock();
7643    }
7644}
7645
7646static bool PreCallValidateCmdDrawIndirect(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkBuffer buffer, bool indexed,
7647                                           VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state, BUFFER_STATE **buffer_state,
7648                                           const char *caller) {
7649    bool skip = ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAWINDIRECT, cb_state, caller,
7650                                    VALIDATION_ERROR_01381, VALIDATION_ERROR_02234);
7651    *buffer_state = getBufferState(dev_data, buffer);
7652    skip |= ValidateMemoryIsBoundToBuffer(dev_data, *buffer_state, caller, VALIDATION_ERROR_02544);
7653    return skip;
7654}
7655
7656static void PostCallRecordCmdDrawIndirect(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
7657                                          BUFFER_STATE *buffer_state) {
7658    UpdateStateCmdDrawType(dev_data, cb_state, bind_point, CMD_DRAWINDIRECT, DRAW_INDIRECT);
7659    AddCommandBufferBindingBuffer(dev_data, cb_state, buffer_state);
7660}
7661
7662VKAPI_ATTR void VKAPI_CALL CmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count,
7663                                           uint32_t stride) {
7664    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7665    GLOBAL_CB_NODE *cb_state = nullptr;
7666    BUFFER_STATE *buffer_state = nullptr;
7667    std::unique_lock<std::mutex> lock(global_lock);
7668    bool skip = PreCallValidateCmdDrawIndirect(dev_data, commandBuffer, buffer, false, VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state,
7669                                               &buffer_state, "vkCmdDrawIndirect()");
7670    lock.unlock();
7671    if (!skip) {
7672        dev_data->dispatch_table.CmdDrawIndirect(commandBuffer, buffer, offset, count, stride);
7673        lock.lock();
7674        PostCallRecordCmdDrawIndirect(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS, buffer_state);
7675        lock.unlock();
7676    }
7677}
7678
7679static bool PreCallValidateCmdDrawIndexedIndirect(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkBuffer buffer, bool indexed,
7680                                                  VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state,
7681                                                  BUFFER_STATE **buffer_state, const char *caller) {
7682    bool skip = ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAWINDEXEDINDIRECT, cb_state, caller,
7683                                    VALIDATION_ERROR_01393, VALIDATION_ERROR_02272);
7684    *buffer_state = getBufferState(dev_data, buffer);
7685    skip |= ValidateMemoryIsBoundToBuffer(dev_data, *buffer_state, caller, VALIDATION_ERROR_02545);
7686    return skip;
7687}
7688
7689static void PostCallRecordCmdDrawIndexedIndirect(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
7690                                                 BUFFER_STATE *buffer_state) {
7691    UpdateStateCmdDrawType(dev_data, cb_state, bind_point, CMD_DRAWINDEXEDINDIRECT, DRAW_INDEXED_INDIRECT);
7692    AddCommandBufferBindingBuffer(dev_data, cb_state, buffer_state);
7693}
7694
7695VKAPI_ATTR void VKAPI_CALL CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
7696                                                  uint32_t count, uint32_t stride) {
7697    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7698    GLOBAL_CB_NODE *cb_state = nullptr;
7699    BUFFER_STATE *buffer_state = nullptr;
7700    std::unique_lock<std::mutex> lock(global_lock);
7701    bool skip = PreCallValidateCmdDrawIndexedIndirect(dev_data, commandBuffer, buffer, true, VK_PIPELINE_BIND_POINT_GRAPHICS,
7702                                                      &cb_state, &buffer_state, "vkCmdDrawIndexedIndirect()");
7703    lock.unlock();
7704    if (!skip) {
7705        dev_data->dispatch_table.CmdDrawIndexedIndirect(commandBuffer, buffer, offset, count, stride);
7706        lock.lock();
7707        PostCallRecordCmdDrawIndexedIndirect(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS, buffer_state);
7708        lock.unlock();
7709    }
7710}
7711
7712static bool PreCallValidateCmdDispatch(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed,
7713                                       VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state, const char *caller) {
7714    return ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DISPATCH, cb_state, caller, VALIDATION_ERROR_01562,
7715                               VALIDATION_ERROR_UNDEFINED);
7716}
7717
7718static void PostCallRecordCmdDispatch(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
7719    UpdateStateCmdDrawDispatchType(dev_data, cb_state, bind_point, CMD_DISPATCH);
7720}
7721
7722VKAPI_ATTR void VKAPI_CALL CmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
7723    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7724    GLOBAL_CB_NODE *cb_state = nullptr;
7725    std::unique_lock<std::mutex> lock(global_lock);
7726    bool skip =
7727        PreCallValidateCmdDispatch(dev_data, commandBuffer, false, VK_PIPELINE_BIND_POINT_COMPUTE, &cb_state, "vkCmdDispatch()");
7728    lock.unlock();
7729    if (!skip) {
7730        dev_data->dispatch_table.CmdDispatch(commandBuffer, x, y, z);
7731        lock.lock();
7732        PostCallRecordCmdDispatch(dev_data, cb_state, VK_PIPELINE_BIND_POINT_COMPUTE);
7733        lock.unlock();
7734    }
7735}
7736
7737static bool PreCallValidateCmdDispatchIndirect(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkBuffer buffer, bool indexed,
7738                                               VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state,
7739                                               BUFFER_STATE **buffer_state, const char *caller) {
7740    bool skip = ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DISPATCHINDIRECT, cb_state, caller,
7741                                    VALIDATION_ERROR_01569, VALIDATION_ERROR_UNDEFINED);
7742    *buffer_state = getBufferState(dev_data, buffer);
7743    skip |= ValidateMemoryIsBoundToBuffer(dev_data, *buffer_state, caller, VALIDATION_ERROR_02547);
7744    return skip;
7745}
7746
7747static void PostCallRecordCmdDispatchIndirect(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
7748                                              BUFFER_STATE *buffer_state) {
7749    UpdateStateCmdDrawDispatchType(dev_data, cb_state, bind_point, CMD_DISPATCHINDIRECT);
7750    AddCommandBufferBindingBuffer(dev_data, cb_state, buffer_state);
7751}
7752
7753VKAPI_ATTR void VKAPI_CALL CmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
7754    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7755    GLOBAL_CB_NODE *cb_state = nullptr;
7756    BUFFER_STATE *buffer_state = nullptr;
7757    std::unique_lock<std::mutex> lock(global_lock);
7758    bool skip = PreCallValidateCmdDispatchIndirect(dev_data, commandBuffer, buffer, false, VK_PIPELINE_BIND_POINT_COMPUTE,
7759                                                   &cb_state, &buffer_state, "vkCmdDispatchIndirect()");
7760    lock.unlock();
7761    if (!skip) {
7762        dev_data->dispatch_table.CmdDispatchIndirect(commandBuffer, buffer, offset);
7763        lock.lock();
7764        PostCallRecordCmdDispatchIndirect(dev_data, cb_state, VK_PIPELINE_BIND_POINT_COMPUTE, buffer_state);
7765        lock.unlock();
7766    }
7767}
7768
7769VKAPI_ATTR void VKAPI_CALL CmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
7770                                         uint32_t regionCount, const VkBufferCopy *pRegions) {
7771    bool skip_call = false;
7772    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7773    std::unique_lock<std::mutex> lock(global_lock);
7774
7775    auto cb_node = getCBNode(dev_data, commandBuffer);
7776    auto src_buff_state = getBufferState(dev_data, srcBuffer);
7777    auto dst_buff_state = getBufferState(dev_data, dstBuffer);
7778    if (cb_node && src_buff_state && dst_buff_state) {
7779        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, src_buff_state, "vkCmdCopyBuffer()", VALIDATION_ERROR_02531);
7780        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_state, "vkCmdCopyBuffer()", VALIDATION_ERROR_02532);
7781        // Update bindings between buffers and cmd buffer
7782        AddCommandBufferBindingBuffer(dev_data, cb_node, src_buff_state);
7783        AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_state);
7784        // Validate that SRC & DST buffers have correct usage flags set
7785        skip_call |= ValidateBufferUsageFlags(dev_data, src_buff_state, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true,
7786                                              VALIDATION_ERROR_01164, "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
7787        skip_call |= ValidateBufferUsageFlags(dev_data, dst_buff_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
7788                                              VALIDATION_ERROR_01165, "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
7789
7790        std::function<bool()> function = [=]() {
7791            return ValidateBufferMemoryIsValid(dev_data, src_buff_state, "vkCmdCopyBuffer()");
7792        };
7793        cb_node->validate_functions.push_back(function);
7794        function = [=]() {
7795            SetBufferMemoryValid(dev_data, dst_buff_state, true);
7796            return false;
7797        };
7798        cb_node->validate_functions.push_back(function);
7799
7800        skip_call |= ValidateCmd(dev_data, cb_node, CMD_COPYBUFFER, "vkCmdCopyBuffer()");
7801        UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_COPYBUFFER);
7802        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyBuffer()", VALIDATION_ERROR_01172);
7803    } else {
7804        // Param_checker will flag errors on invalid objects, just assert here as debugging aid
7805        assert(0);
7806    }
7807    lock.unlock();
7808    if (!skip_call) dev_data->dispatch_table.CmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions);
7809}
7810
7811// Test if two VkExtent3D structs are equivalent
7812static inline bool IsExtentEqual(const VkExtent3D *extent, const VkExtent3D *other_extent) {
7813    bool result = true;
7814    if ((extent->width != other_extent->width) || (extent->height != other_extent->height) ||
7815        (extent->depth != other_extent->depth)) {
7816        result = false;
7817    }
7818    return result;
7819}
7820
7821// Returns the image extent of a specific subresource.
7822static inline VkExtent3D GetImageSubresourceExtent(const IMAGE_STATE *img, const VkImageSubresourceLayers *subresource) {
7823    const uint32_t mip = subresource->mipLevel;
7824    VkExtent3D extent = img->createInfo.extent;
7825    extent.width = std::max(1U, extent.width >> mip);
7826    extent.height = std::max(1U, extent.height >> mip);
7827    extent.depth = std::max(1U, extent.depth >> mip);
7828    return extent;
7829}
7830
7831// Test if the extent argument has all dimensions set to 0.
7832static inline bool IsExtentZero(const VkExtent3D *extent) {
7833    return ((extent->width == 0) && (extent->height == 0) && (extent->depth == 0));
7834}
7835
7836// Returns the image transfer granularity for a specific image scaled by compressed block size if necessary.
7837static inline VkExtent3D GetScaledItg(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const IMAGE_STATE *img) {
7838    // Default to (0, 0, 0) granularity in case we can't find the real granularity for the physical device.
7839    VkExtent3D granularity = {0, 0, 0};
7840    auto pPool = getCommandPoolNode(dev_data, cb_node->createInfo.commandPool);
7841    if (pPool) {
7842        granularity = dev_data->phys_dev_properties.queue_family_properties[pPool->queueFamilyIndex].minImageTransferGranularity;
7843        if (vk_format_is_compressed(img->createInfo.format)) {
7844            auto block_size = vk_format_compressed_block_size(img->createInfo.format);
7845            granularity.width *= block_size.width;
7846            granularity.height *= block_size.height;
7847        }
7848    }
7849    return granularity;
7850}
7851
7852// Test elements of a VkExtent3D structure against alignment constraints contained in another VkExtent3D structure
7853static inline bool IsExtentAligned(const VkExtent3D *extent, const VkExtent3D *granularity) {
7854    bool valid = true;
7855    if ((vk_safe_modulo(extent->depth, granularity->depth) != 0) || (vk_safe_modulo(extent->width, granularity->width) != 0) ||
7856        (vk_safe_modulo(extent->height, granularity->height) != 0)) {
7857        valid = false;
7858    }
7859    return valid;
7860}
7861
7862// Check elements of a VkOffset3D structure against a queue family's Image Transfer Granularity values
7863static inline bool CheckItgOffset(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const VkOffset3D *offset,
7864                                  const VkExtent3D *granularity, const uint32_t i, const char *function, const char *member) {
7865    bool skip = false;
7866    VkExtent3D offset_extent = {};
7867    offset_extent.width = static_cast<uint32_t>(abs(offset->x));
7868    offset_extent.height = static_cast<uint32_t>(abs(offset->y));
7869    offset_extent.depth = static_cast<uint32_t>(abs(offset->z));
7870    if (IsExtentZero(granularity)) {
7871        // If the queue family image transfer granularity is (0, 0, 0), then the offset must always be (0, 0, 0)
7872        if (IsExtentZero(&offset_extent) == false) {
7873            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7874                            DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
7875                            "%s: pRegion[%d].%s (x=%d, y=%d, z=%d) must be (x=0, y=0, z=0) "
7876                            "when the command buffer's queue family image transfer granularity is (w=0, h=0, d=0).",
7877                            function, i, member, offset->x, offset->y, offset->z);
7878        }
7879    } else {
7880        // If the queue family image transfer granularity is not (0, 0, 0), then the offset dimensions must always be even
7881        // integer multiples of the image transfer granularity.
7882        if (IsExtentAligned(&offset_extent, granularity) == false) {
7883            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7884                            DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
7885                            "%s: pRegion[%d].%s (x=%d, y=%d, z=%d) dimensions must be even integer "
7886                            "multiples of this command buffer's queue family image transfer granularity (w=%d, h=%d, d=%d).",
7887                            function, i, member, offset->x, offset->y, offset->z, granularity->width, granularity->height,
7888                            granularity->depth);
7889        }
7890    }
7891    return skip;
7892}
7893
7894// Check elements of a VkExtent3D structure against a queue family's Image Transfer Granularity values
7895static inline bool CheckItgExtent(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const VkExtent3D *extent,
7896                                  const VkOffset3D *offset, const VkExtent3D *granularity, const VkExtent3D *subresource_extent,
7897                                  const uint32_t i, const char *function, const char *member) {
7898    bool skip = false;
7899    if (IsExtentZero(granularity)) {
7900        // If the queue family image transfer granularity is (0, 0, 0), then the extent must always match the image
7901        // subresource extent.
7902        if (IsExtentEqual(extent, subresource_extent) == false) {
7903            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7904                            DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
7905                            "%s: pRegion[%d].%s (w=%d, h=%d, d=%d) must match the image subresource extents (w=%d, h=%d, d=%d) "
7906                            "when the command buffer's queue family image transfer granularity is (w=0, h=0, d=0).",
7907                            function, i, member, extent->width, extent->height, extent->depth, subresource_extent->width,
7908                            subresource_extent->height, subresource_extent->depth);
7909        }
7910    } else {
7911        // If the queue family image transfer granularity is not (0, 0, 0), then the extent dimensions must always be even
7912        // integer multiples of the image transfer granularity or the offset + extent dimensions must always match the image
7913        // subresource extent dimensions.
7914        VkExtent3D offset_extent_sum = {};
7915        offset_extent_sum.width = static_cast<uint32_t>(abs(offset->x)) + extent->width;
7916        offset_extent_sum.height = static_cast<uint32_t>(abs(offset->y)) + extent->height;
7917        offset_extent_sum.depth = static_cast<uint32_t>(abs(offset->z)) + extent->depth;
7918        if ((IsExtentAligned(extent, granularity) == false) && (IsExtentEqual(&offset_extent_sum, subresource_extent) == false)) {
7919            skip |=
7920                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7921                        DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
7922                        "%s: pRegion[%d].%s (w=%d, h=%d, d=%d) dimensions must be even integer multiples of this command buffer's "
7923                        "queue family image transfer granularity (w=%d, h=%d, d=%d) or offset (x=%d, y=%d, z=%d) + "
7924                        "extent (w=%d, h=%d, d=%d) must match the image subresource extents (w=%d, h=%d, d=%d).",
7925                        function, i, member, extent->width, extent->height, extent->depth, granularity->width, granularity->height,
7926                        granularity->depth, offset->x, offset->y, offset->z, extent->width, extent->height, extent->depth,
7927                        subresource_extent->width, subresource_extent->height, subresource_extent->depth);
7928        }
7929    }
7930    return skip;
7931}
7932
7933// Check a uint32_t width or stride value against a queue family's Image Transfer Granularity width value
7934static inline bool CheckItgInt(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const uint32_t value,
7935                               const uint32_t granularity, const uint32_t i, const char *function, const char *member) {
7936    bool skip = false;
7937    if (vk_safe_modulo(value, granularity) != 0) {
7938        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7939                        DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
7940                        "%s: pRegion[%d].%s (%d) must be an even integer multiple of this command buffer's queue family image "
7941                        "transfer granularity width (%d).",
7942                        function, i, member, value, granularity);
7943    }
7944    return skip;
7945}
7946
7947// Check a VkDeviceSize value against a queue family's Image Transfer Granularity width value
7948static inline bool CheckItgSize(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const VkDeviceSize value,
7949                                const uint32_t granularity, const uint32_t i, const char *function, const char *member) {
7950    bool skip = false;
7951    if (vk_safe_modulo(value, granularity) != 0) {
7952        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7953                        DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
7954                        "%s: pRegion[%d].%s (%" PRIdLEAST64
7955                        ") must be an even integer multiple of this command buffer's queue family image transfer "
7956                        "granularity width (%d).",
7957                        function, i, member, value, granularity);
7958    }
7959    return skip;
7960}
7961
7962// Check valid usage Image Tranfer Granularity requirements for elements of a VkImageCopy structure
7963static inline bool ValidateCopyImageTransferGranularityRequirements(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node,
7964                                                                    const IMAGE_STATE *img, const VkImageCopy *region,
7965                                                                    const uint32_t i, const char *function) {
7966    bool skip = false;
7967    VkExtent3D granularity = GetScaledItg(dev_data, cb_node, img);
7968    skip |= CheckItgOffset(dev_data, cb_node, &region->srcOffset, &granularity, i, function, "srcOffset");
7969    skip |= CheckItgOffset(dev_data, cb_node, &region->dstOffset, &granularity, i, function, "dstOffset");
7970    VkExtent3D subresource_extent = GetImageSubresourceExtent(img, &region->dstSubresource);
7971    skip |= CheckItgExtent(dev_data, cb_node, &region->extent, &region->dstOffset, &granularity, &subresource_extent, i, function,
7972                           "extent");
7973    return skip;
7974}
7975
7976// Check valid usage Image Tranfer Granularity requirements for elements of a VkBufferImageCopy structure
7977static inline bool ValidateCopyBufferImageTransferGranularityRequirements(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node,
7978                                                                          const IMAGE_STATE *img, const VkBufferImageCopy *region,
7979                                                                          const uint32_t i, const char *function) {
7980    bool skip = false;
7981    if (vk_format_is_compressed(img->createInfo.format) == true) {
7982        // TODO: Add granularity checking for compressed formats
7983
7984        // bufferRowLength must be a multiple of the compressed texel block width
7985        // bufferImageHeight must be a multiple of the compressed texel block height
7986        // all members of imageOffset must be a multiple of the corresponding dimensions of the compressed texel block
7987        // bufferOffset must be a multiple of the compressed texel block size in bytes
7988        // imageExtent.width must be a multiple of the compressed texel block width or (imageExtent.width + imageOffset.x)
7989        //     must equal the image subresource width
7990        // imageExtent.height must be a multiple of the compressed texel block height or (imageExtent.height + imageOffset.y)
7991        //     must equal the image subresource height
7992        // imageExtent.depth must be a multiple of the compressed texel block depth or (imageExtent.depth + imageOffset.z)
7993        //     must equal the image subresource depth
7994    } else {
7995        VkExtent3D granularity = GetScaledItg(dev_data, cb_node, img);
7996        skip |= CheckItgSize(dev_data, cb_node, region->bufferOffset, granularity.width, i, function, "bufferOffset");
7997        skip |= CheckItgInt(dev_data, cb_node, region->bufferRowLength, granularity.width, i, function, "bufferRowLength");
7998        skip |= CheckItgInt(dev_data, cb_node, region->bufferImageHeight, granularity.width, i, function, "bufferImageHeight");
7999        skip |= CheckItgOffset(dev_data, cb_node, &region->imageOffset, &granularity, i, function, "imageOffset");
8000        VkExtent3D subresource_extent = GetImageSubresourceExtent(img, &region->imageSubresource);
8001        skip |= CheckItgExtent(dev_data, cb_node, &region->imageExtent, &region->imageOffset, &granularity, &subresource_extent, i,
8002                               function, "imageExtent");
8003    }
8004    return skip;
8005}
8006
8007VKAPI_ATTR void VKAPI_CALL CmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
8008                                        VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
8009                                        const VkImageCopy *pRegions) {
8010    bool skip_call = false;
8011    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8012    std::unique_lock<std::mutex> lock(global_lock);
8013
8014    auto cb_node = getCBNode(dev_data, commandBuffer);
8015    auto src_image_state = getImageState(dev_data, srcImage);
8016    auto dst_image_state = getImageState(dev_data, dstImage);
8017    if (cb_node && src_image_state && dst_image_state) {
8018
8019        skip_call = PreCallValidateCmdCopyImage(dev_data, cb_node, src_image_state, dst_image_state, regionCount, pRegions);
8020
8021        skip_call |= ValidateMemoryIsBoundToImage(dev_data, src_image_state, "vkCmdCopyImage()", VALIDATION_ERROR_02533);
8022        skip_call |= ValidateMemoryIsBoundToImage(dev_data, dst_image_state, "vkCmdCopyImage()", VALIDATION_ERROR_02534);
8023        // Update bindings between images and cmd buffer
8024        AddCommandBufferBindingImage(dev_data, cb_node, src_image_state);
8025        AddCommandBufferBindingImage(dev_data, cb_node, dst_image_state);
8026        // Validate that SRC & DST images have correct usage flags set
8027        skip_call |= ValidateImageUsageFlags(dev_data, src_image_state, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
8028                                             VALIDATION_ERROR_01178, "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
8029        skip_call |= ValidateImageUsageFlags(dev_data, dst_image_state, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
8030                                             VALIDATION_ERROR_01181, "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
8031        std::function<bool()> function = [=]() {
8032            return ValidateImageMemoryIsValid(dev_data, src_image_state, "vkCmdCopyImage()");
8033        };
8034        cb_node->validate_functions.push_back(function);
8035        function = [=]() {
8036            SetImageMemoryValid(dev_data, dst_image_state, true);
8037            return false;
8038        };
8039        cb_node->validate_functions.push_back(function);
8040
8041        skip_call |= ValidateCmd(dev_data, cb_node, CMD_COPYIMAGE, "vkCmdCopyImage()");
8042        UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_COPYIMAGE);
8043        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyImage()", VALIDATION_ERROR_01194);
8044        for (uint32_t i = 0; i < regionCount; ++i) {
8045            skip_call |= VerifySourceImageLayout(dev_data, cb_node, srcImage, pRegions[i].srcSubresource, srcImageLayout,
8046                                                 VALIDATION_ERROR_01180);
8047            skip_call |= VerifyDestImageLayout(dev_data, cb_node, dstImage, pRegions[i].dstSubresource, dstImageLayout,
8048                                               VALIDATION_ERROR_01183);
8049            skip_call |= ValidateCopyImageTransferGranularityRequirements(dev_data, cb_node, dst_image_state, &pRegions[i], i,
8050                                                                          "vkCmdCopyImage()");
8051        }
8052    } else {
8053        assert(0);
8054    }
8055    lock.unlock();
8056    if (!skip_call)
8057        dev_data->dispatch_table.CmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
8058                                              pRegions);
8059}
8060
8061// Validate that an image's sampleCount matches the requirement for a specific API call
8062bool ValidateImageSampleCount(layer_data *dev_data, IMAGE_STATE *image_state, VkSampleCountFlagBits sample_count,
8063                              const char *location, UNIQUE_VALIDATION_ERROR_CODE msgCode) {
8064    bool skip = false;
8065    if (image_state->createInfo.samples != sample_count) {
8066        skip =
8067            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
8068                    reinterpret_cast<uint64_t &>(image_state->image), 0, msgCode, "DS",
8069                    "%s for image 0x%" PRIxLEAST64 " was created with a sample count of %s but must be %s. %s", location,
8070                    reinterpret_cast<uint64_t &>(image_state->image), string_VkSampleCountFlagBits(image_state->createInfo.samples),
8071                    string_VkSampleCountFlagBits(sample_count), validation_error_map[msgCode]);
8072    }
8073    return skip;
8074}
8075
8076VKAPI_ATTR void VKAPI_CALL CmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
8077                                        VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
8078                                        const VkImageBlit *pRegions, VkFilter filter) {
8079    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8080    std::unique_lock<std::mutex> lock(global_lock);
8081
8082    auto cb_node = getCBNode(dev_data, commandBuffer);
8083    auto src_image_state = getImageState(dev_data, srcImage);
8084    auto dst_image_state = getImageState(dev_data, dstImage);
8085
8086    bool skip = PreCallValidateCmdBlitImage(dev_data, cb_node, src_image_state, dst_image_state, regionCount, pRegions, filter);
8087
8088    if (!skip) {
8089        PreCallRecordCmdBlitImage(dev_data, cb_node, src_image_state, dst_image_state);
8090        lock.unlock();
8091        dev_data->dispatch_table.CmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
8092                                              pRegions, filter);
8093    }
8094}
8095
8096VKAPI_ATTR void VKAPI_CALL CmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
8097                                                VkImageLayout dstImageLayout, uint32_t regionCount,
8098                                                const VkBufferImageCopy *pRegions) {
8099    bool skip_call = false;
8100    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8101    std::unique_lock<std::mutex> lock(global_lock);
8102
8103    auto cb_node = getCBNode(dev_data, commandBuffer);
8104    auto src_buff_state = getBufferState(dev_data, srcBuffer);
8105    auto dst_image_state = getImageState(dev_data, dstImage);
8106    if (cb_node && src_buff_state && dst_image_state) {
8107        skip_call |= ValidateImageSampleCount(dev_data, dst_image_state, VK_SAMPLE_COUNT_1_BIT,
8108                                              "vkCmdCopyBufferToImage(): dstImage", VALIDATION_ERROR_01232);
8109        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, src_buff_state, "vkCmdCopyBufferToImage()", VALIDATION_ERROR_02535);
8110        skip_call |= ValidateMemoryIsBoundToImage(dev_data, dst_image_state, "vkCmdCopyBufferToImage()", VALIDATION_ERROR_02536);
8111        AddCommandBufferBindingBuffer(dev_data, cb_node, src_buff_state);
8112        AddCommandBufferBindingImage(dev_data, cb_node, dst_image_state);
8113        skip_call |=
8114            ValidateBufferUsageFlags(dev_data, src_buff_state, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true, VALIDATION_ERROR_01230,
8115                                     "vkCmdCopyBufferToImage()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
8116        skip_call |= ValidateImageUsageFlags(dev_data, dst_image_state, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
8117                                             VALIDATION_ERROR_01231, "vkCmdCopyBufferToImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
8118        std::function<bool()> function = [=]() {
8119            SetImageMemoryValid(dev_data, dst_image_state, true);
8120            return false;
8121        };
8122        cb_node->validate_functions.push_back(function);
8123        function = [=]() { return ValidateBufferMemoryIsValid(dev_data, src_buff_state, "vkCmdCopyBufferToImage()"); };
8124        cb_node->validate_functions.push_back(function);
8125
8126        skip_call |= ValidateCmd(dev_data, cb_node, CMD_COPYBUFFERTOIMAGE, "vkCmdCopyBufferToImage()");
8127        UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_COPYBUFFERTOIMAGE);
8128        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyBufferToImage()", VALIDATION_ERROR_01242);
8129        for (uint32_t i = 0; i < regionCount; ++i) {
8130            skip_call |= VerifyDestImageLayout(dev_data, cb_node, dstImage, pRegions[i].imageSubresource, dstImageLayout,
8131                                               VALIDATION_ERROR_01234);
8132            skip_call |= ValidateCopyBufferImageTransferGranularityRequirements(dev_data, cb_node, dst_image_state, &pRegions[i], i,
8133                                                                                "vkCmdCopyBufferToImage()");
8134        }
8135    } else {
8136        assert(0);
8137    }
8138    lock.unlock();
8139    if (!skip_call)
8140        dev_data->dispatch_table.CmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions);
8141}
8142
8143VKAPI_ATTR void VKAPI_CALL CmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
8144                                                VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy *pRegions) {
8145    bool skip_call = false;
8146    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8147    std::unique_lock<std::mutex> lock(global_lock);
8148
8149    auto cb_node = getCBNode(dev_data, commandBuffer);
8150    auto src_image_state = getImageState(dev_data, srcImage);
8151    auto dst_buff_state = getBufferState(dev_data, dstBuffer);
8152    if (cb_node && src_image_state && dst_buff_state) {
8153        skip_call |= ValidateImageSampleCount(dev_data, src_image_state, VK_SAMPLE_COUNT_1_BIT,
8154                                              "vkCmdCopyImageToBuffer(): srcImage", VALIDATION_ERROR_01249);
8155        skip_call |= ValidateMemoryIsBoundToImage(dev_data, src_image_state, "vkCmdCopyImageToBuffer()", VALIDATION_ERROR_02537);
8156        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_state, "vkCmdCopyImageToBuffer()", VALIDATION_ERROR_02538);
8157        // Update bindings between buffer/image and cmd buffer
8158        AddCommandBufferBindingImage(dev_data, cb_node, src_image_state);
8159        AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_state);
8160        // Validate that SRC image & DST buffer have correct usage flags set
8161        skip_call |= ValidateImageUsageFlags(dev_data, src_image_state, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
8162                                             VALIDATION_ERROR_01248, "vkCmdCopyImageToBuffer()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
8163        skip_call |=
8164            ValidateBufferUsageFlags(dev_data, dst_buff_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, VALIDATION_ERROR_01252,
8165                                     "vkCmdCopyImageToBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8166        std::function<bool()> function = [=]() {
8167            return ValidateImageMemoryIsValid(dev_data, src_image_state, "vkCmdCopyImageToBuffer()");
8168        };
8169        cb_node->validate_functions.push_back(function);
8170        function = [=]() {
8171            SetBufferMemoryValid(dev_data, dst_buff_state, true);
8172            return false;
8173        };
8174        cb_node->validate_functions.push_back(function);
8175
8176        skip_call |= ValidateCmd(dev_data, cb_node, CMD_COPYIMAGETOBUFFER, "vkCmdCopyImageToBuffer()");
8177        UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_COPYIMAGETOBUFFER);
8178        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyImageToBuffer()", VALIDATION_ERROR_01260);
8179        for (uint32_t i = 0; i < regionCount; ++i) {
8180            skip_call |= VerifySourceImageLayout(dev_data, cb_node, srcImage, pRegions[i].imageSubresource, srcImageLayout,
8181                                                 VALIDATION_ERROR_01251);
8182            skip_call |= ValidateCopyBufferImageTransferGranularityRequirements(dev_data, cb_node, src_image_state, &pRegions[i], i,
8183                                                                                "CmdCopyImageToBuffer");
8184        }
8185    } else {
8186        assert(0);
8187    }
8188    lock.unlock();
8189    if (!skip_call)
8190        dev_data->dispatch_table.CmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions);
8191}
8192
8193VKAPI_ATTR void VKAPI_CALL CmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
8194                                           VkDeviceSize dataSize, const uint32_t *pData) {
8195    bool skip_call = false;
8196    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8197    std::unique_lock<std::mutex> lock(global_lock);
8198
8199    auto cb_node = getCBNode(dev_data, commandBuffer);
8200    auto dst_buff_state = getBufferState(dev_data, dstBuffer);
8201    if (cb_node && dst_buff_state) {
8202        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_state, "vkCmdUpdateBuffer()", VALIDATION_ERROR_02530);
8203        // Update bindings between buffer and cmd buffer
8204        AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_state);
8205        // Validate that DST buffer has correct usage flags set
8206        skip_call |= ValidateBufferUsageFlags(dev_data, dst_buff_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8207                                              VALIDATION_ERROR_01146, "vkCmdUpdateBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8208        std::function<bool()> function = [=]() {
8209            SetBufferMemoryValid(dev_data, dst_buff_state, true);
8210            return false;
8211        };
8212        cb_node->validate_functions.push_back(function);
8213
8214        skip_call |= ValidateCmd(dev_data, cb_node, CMD_UPDATEBUFFER, "vkCmdUpdateBuffer()");
8215        UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_UPDATEBUFFER);
8216        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdUpdateBuffer()", VALIDATION_ERROR_01155);
8217    } else {
8218        assert(0);
8219    }
8220    lock.unlock();
8221    if (!skip_call) dev_data->dispatch_table.CmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData);
8222}
8223
8224VKAPI_ATTR void VKAPI_CALL CmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
8225                                         VkDeviceSize size, uint32_t data) {
8226    bool skip_call = false;
8227    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8228    std::unique_lock<std::mutex> lock(global_lock);
8229
8230    auto cb_node = getCBNode(dev_data, commandBuffer);
8231    auto dst_buff_state = getBufferState(dev_data, dstBuffer);
8232    if (cb_node && dst_buff_state) {
8233        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_state, "vkCmdFillBuffer()", VALIDATION_ERROR_02529);
8234        // Update bindings between buffer and cmd buffer
8235        AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_state);
8236        // Validate that DST buffer has correct usage flags set
8237        skip_call |= ValidateBufferUsageFlags(dev_data, dst_buff_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8238                                              VALIDATION_ERROR_01137, "vkCmdFillBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8239        std::function<bool()> function = [=]() {
8240            SetBufferMemoryValid(dev_data, dst_buff_state, true);
8241            return false;
8242        };
8243        cb_node->validate_functions.push_back(function);
8244
8245        skip_call |= ValidateCmd(dev_data, cb_node, CMD_FILLBUFFER, "vkCmdFillBuffer()");
8246        UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_FILLBUFFER);
8247        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdFillBuffer()", VALIDATION_ERROR_01142);
8248    } else {
8249        assert(0);
8250    }
8251    lock.unlock();
8252    if (!skip_call) dev_data->dispatch_table.CmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
8253}
8254
8255VKAPI_ATTR void VKAPI_CALL CmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
8256                                               const VkClearAttachment *pAttachments, uint32_t rectCount,
8257                                               const VkClearRect *pRects) {
8258    bool skip = false;
8259    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8260    {
8261        std::lock_guard<std::mutex> lock(global_lock);
8262        skip = PreCallValidateCmdClearAttachments(dev_data, commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
8263    }
8264    if (!skip) dev_data->dispatch_table.CmdClearAttachments(commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
8265}
8266
8267VKAPI_ATTR void VKAPI_CALL CmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
8268                                              const VkClearColorValue *pColor, uint32_t rangeCount,
8269                                              const VkImageSubresourceRange *pRanges) {
8270    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8271    std::unique_lock<std::mutex> lock(global_lock);
8272
8273    bool skip = PreCallValidateCmdClearColorImage(dev_data, commandBuffer, image, imageLayout, rangeCount, pRanges);
8274    if (!skip) {
8275        PreCallRecordCmdClearImage(dev_data, commandBuffer, image, imageLayout, rangeCount, pRanges, CMD_CLEARCOLORIMAGE);
8276        lock.unlock();
8277        dev_data->dispatch_table.CmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
8278    }
8279}
8280
8281VKAPI_ATTR void VKAPI_CALL CmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
8282                                                     const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
8283                                                     const VkImageSubresourceRange *pRanges) {
8284    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8285    std::unique_lock<std::mutex> lock(global_lock);
8286
8287    bool skip = PreCallValidateCmdClearDepthStencilImage(dev_data, commandBuffer, image, imageLayout, rangeCount, pRanges);
8288    if (!skip) {
8289        PreCallRecordCmdClearImage(dev_data, commandBuffer, image, imageLayout, rangeCount, pRanges, CMD_CLEARDEPTHSTENCILIMAGE);
8290        lock.unlock();
8291        dev_data->dispatch_table.CmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount, pRanges);
8292    }
8293}
8294
8295VKAPI_ATTR void VKAPI_CALL CmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
8296                                           VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
8297                                           const VkImageResolve *pRegions) {
8298    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8299    std::unique_lock<std::mutex> lock(global_lock);
8300
8301    auto cb_node = getCBNode(dev_data, commandBuffer);
8302    auto src_image_state = getImageState(dev_data, srcImage);
8303    auto dst_image_state = getImageState(dev_data, dstImage);
8304
8305    bool skip = PreCallValidateCmdResolveImage(dev_data, cb_node, src_image_state, dst_image_state, regionCount, pRegions);
8306
8307    if (!skip) {
8308        PreCallRecordCmdResolveImage(dev_data, cb_node, src_image_state, dst_image_state);
8309        lock.unlock();
8310        dev_data->dispatch_table.CmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
8311                                                 pRegions);
8312    }
8313}
8314
8315VKAPI_ATTR void VKAPI_CALL GetImageSubresourceLayout(VkDevice device, VkImage image, const VkImageSubresource *pSubresource,
8316    VkSubresourceLayout *pLayout) {
8317    bool skipCall = false;
8318    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8319    VkFormat format;
8320
8321    auto imageEntry = getImageState(device_data, image);
8322
8323    // Validate that image aspects match formats
8324    if (imageEntry) {
8325        format = imageEntry->createInfo.format;
8326        if (vk_format_is_color(format)) {
8327            if (pSubresource->aspectMask != VK_IMAGE_ASPECT_COLOR_BIT) {
8328                std::stringstream ss;
8329                ss << "vkGetImageSubresourceLayout: For color formats, the aspectMask field of VkImageSubresource must be "
8330                    "VK_IMAGE_ASPECT_COLOR.";
8331                skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
8332                    (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE", "%s. %s", ss.str().c_str(),
8333                    validation_error_map[VALIDATION_ERROR_00741]);
8334            }
8335        } else if (vk_format_is_depth_or_stencil(format)) {
8336            if ((pSubresource->aspectMask != VK_IMAGE_ASPECT_DEPTH_BIT) &&
8337                (pSubresource->aspectMask != VK_IMAGE_ASPECT_STENCIL_BIT)) {
8338                std::stringstream ss;
8339                ss << "vkGetImageSubresourceLayout: For depth/stencil formats, the aspectMask selects either the depth or stencil "
8340                    "image aspectMask.";
8341                skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
8342                    (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE", "%s. %s", ss.str().c_str(),
8343                    validation_error_map[VALIDATION_ERROR_00741]);
8344            }
8345        }
8346    }
8347
8348    if (!skipCall) {
8349        device_data->dispatch_table.GetImageSubresourceLayout(device, image, pSubresource, pLayout);
8350    }
8351}
8352
8353bool setEventStageMask(VkQueue queue, VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
8354    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8355    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8356    if (pCB) {
8357        pCB->eventToStageMap[event] = stageMask;
8358    }
8359    auto queue_data = dev_data->queueMap.find(queue);
8360    if (queue_data != dev_data->queueMap.end()) {
8361        queue_data->second.eventToStageMap[event] = stageMask;
8362    }
8363    return false;
8364}
8365
8366VKAPI_ATTR void VKAPI_CALL CmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
8367    bool skip_call = false;
8368    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8369    std::unique_lock<std::mutex> lock(global_lock);
8370    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8371    if (pCB) {
8372        skip_call |= ValidateCmd(dev_data, pCB, CMD_SETEVENT, "vkCmdSetEvent()");
8373        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETEVENT);
8374        skip_call |= insideRenderPass(dev_data, pCB, "vkCmdSetEvent()", VALIDATION_ERROR_00238);
8375        skip_call |=
8376            ValidateStageMaskGsTsEnables(dev_data, stageMask, "vkCmdSetEvent()", VALIDATION_ERROR_00230, VALIDATION_ERROR_00231);
8377        auto event_state = getEventNode(dev_data, event);
8378        if (event_state) {
8379            addCommandBufferBinding(&event_state->cb_bindings,
8380                                    {reinterpret_cast<uint64_t &>(event), VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT}, pCB);
8381            event_state->cb_bindings.insert(pCB);
8382        }
8383        pCB->events.push_back(event);
8384        if (!pCB->waitedEvents.count(event)) {
8385            pCB->writeEventsBeforeWait.push_back(event);
8386        }
8387        std::function<bool(VkQueue)> eventUpdate =
8388            std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, stageMask);
8389        pCB->eventUpdates.push_back(eventUpdate);
8390    }
8391    lock.unlock();
8392    if (!skip_call) dev_data->dispatch_table.CmdSetEvent(commandBuffer, event, stageMask);
8393}
8394
8395VKAPI_ATTR void VKAPI_CALL CmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
8396    bool skip_call = false;
8397    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8398    std::unique_lock<std::mutex> lock(global_lock);
8399    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8400    if (pCB) {
8401        skip_call |= ValidateCmd(dev_data, pCB, CMD_RESETEVENT, "vkCmdResetEvent()");
8402        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_RESETEVENT);
8403        skip_call |= insideRenderPass(dev_data, pCB, "vkCmdResetEvent()", VALIDATION_ERROR_00249);
8404        skip_call |=
8405            ValidateStageMaskGsTsEnables(dev_data, stageMask, "vkCmdResetEvent()", VALIDATION_ERROR_00240, VALIDATION_ERROR_00241);
8406        auto event_state = getEventNode(dev_data, event);
8407        if (event_state) {
8408            addCommandBufferBinding(&event_state->cb_bindings,
8409                                    {reinterpret_cast<uint64_t &>(event), VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT}, pCB);
8410            event_state->cb_bindings.insert(pCB);
8411        }
8412        pCB->events.push_back(event);
8413        if (!pCB->waitedEvents.count(event)) {
8414            pCB->writeEventsBeforeWait.push_back(event);
8415        }
8416        // TODO : Add check for VALIDATION_ERROR_00226
8417        std::function<bool(VkQueue)> eventUpdate =
8418            std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, VkPipelineStageFlags(0));
8419        pCB->eventUpdates.push_back(eventUpdate);
8420    }
8421    lock.unlock();
8422    if (!skip_call) dev_data->dispatch_table.CmdResetEvent(commandBuffer, event, stageMask);
8423}
8424
8425static bool ValidateBarriers(const char *funcName, VkCommandBuffer cmdBuffer, uint32_t memBarrierCount,
8426                             const VkMemoryBarrier *pMemBarriers, uint32_t bufferBarrierCount,
8427                             const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
8428                             const VkImageMemoryBarrier *pImageMemBarriers) {
8429    bool skip = false;
8430    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(cmdBuffer), layer_data_map);
8431    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
8432    if (pCB->activeRenderPass && memBarrierCount) {
8433        if (!pCB->activeRenderPass->hasSelfDependency[pCB->activeSubpass]) {
8434            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8435                            DRAWSTATE_INVALID_BARRIER, "DS",
8436                            "%s: Barriers cannot be set during subpass %d "
8437                            "with no self dependency specified.",
8438                            funcName, pCB->activeSubpass);
8439        }
8440    }
8441    for (uint32_t i = 0; i < imageMemBarrierCount; ++i) {
8442        auto mem_barrier = &pImageMemBarriers[i];
8443        auto image_data = getImageState(dev_data, mem_barrier->image);
8444        if (image_data) {
8445            uint32_t src_q_f_index = mem_barrier->srcQueueFamilyIndex;
8446            uint32_t dst_q_f_index = mem_barrier->dstQueueFamilyIndex;
8447            if (image_data->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
8448                // srcQueueFamilyIndex and dstQueueFamilyIndex must both
8449                // be VK_QUEUE_FAMILY_IGNORED
8450                if ((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) {
8451                    skip |=
8452                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8453                                DRAWSTATE_INVALID_QUEUE_INDEX, "DS", "%s: Image Barrier for image 0x%" PRIx64
8454                                                                     " was created with sharingMode of "
8455                                                                     "VK_SHARING_MODE_CONCURRENT. Src and dst "
8456                                                                     "queueFamilyIndices must be VK_QUEUE_FAMILY_IGNORED.",
8457                                funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
8458                }
8459            } else {
8460                // Sharing mode is VK_SHARING_MODE_EXCLUSIVE. srcQueueFamilyIndex and
8461                // dstQueueFamilyIndex must either both be VK_QUEUE_FAMILY_IGNORED,
8462                // or both be a valid queue family
8463                if (((src_q_f_index == VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index == VK_QUEUE_FAMILY_IGNORED)) &&
8464                    (src_q_f_index != dst_q_f_index)) {
8465                    skip |=
8466                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8467                                DRAWSTATE_INVALID_QUEUE_INDEX, "DS", "%s: Image 0x%" PRIx64
8468                                                                     " was created with sharingMode "
8469                                                                     "of VK_SHARING_MODE_EXCLUSIVE. If one of src- or "
8470                                                                     "dstQueueFamilyIndex is VK_QUEUE_FAMILY_IGNORED, both "
8471                                                                     "must be.",
8472                                funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
8473                } else if (((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) && (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) &&
8474                           ((src_q_f_index >= dev_data->phys_dev_properties.queue_family_properties.size()) ||
8475                            (dst_q_f_index >= dev_data->phys_dev_properties.queue_family_properties.size()))) {
8476                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8477                                    __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
8478                                    "%s: Image 0x%" PRIx64
8479                                    " was created with sharingMode "
8480                                    "of VK_SHARING_MODE_EXCLUSIVE, but srcQueueFamilyIndex %d"
8481                                    " or dstQueueFamilyIndex %d is greater than " PRINTF_SIZE_T_SPECIFIER
8482                                    "queueFamilies crated for this device.",
8483                                    funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image), src_q_f_index, dst_q_f_index,
8484                                    dev_data->phys_dev_properties.queue_family_properties.size());
8485                }
8486            }
8487        }
8488
8489        if (mem_barrier) {
8490            if (mem_barrier->oldLayout != mem_barrier->newLayout) {
8491                skip |=
8492                    ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->srcAccessMask, mem_barrier->oldLayout, "Source");
8493                skip |=
8494                    ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->dstAccessMask, mem_barrier->newLayout, "Dest");
8495            }
8496            if (mem_barrier->newLayout == VK_IMAGE_LAYOUT_UNDEFINED || mem_barrier->newLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
8497                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8498                                DRAWSTATE_INVALID_BARRIER, "DS",
8499                                "%s: Image Layout cannot be transitioned to UNDEFINED or "
8500                                "PREINITIALIZED.",
8501                                funcName);
8502            }
8503            VkFormat format = VK_FORMAT_UNDEFINED;
8504            uint32_t arrayLayers = 0, mipLevels = 0;
8505            bool imageFound = false;
8506            if (image_data) {
8507                format = image_data->createInfo.format;
8508                arrayLayers = image_data->createInfo.arrayLayers;
8509                mipLevels = image_data->createInfo.mipLevels;
8510                imageFound = true;
8511            } else if (dev_data->device_extensions.wsi_enabled) {
8512                auto imageswap_data = getSwapchainFromImage(dev_data, mem_barrier->image);
8513                if (imageswap_data) {
8514                    auto swapchain_data = getSwapchainNode(dev_data, imageswap_data);
8515                    if (swapchain_data) {
8516                        format = swapchain_data->createInfo.imageFormat;
8517                        arrayLayers = swapchain_data->createInfo.imageArrayLayers;
8518                        mipLevels = 1;
8519                        imageFound = true;
8520                    }
8521                }
8522            }
8523            if (imageFound) {
8524                skip |= ValidateImageSubrangeLevelLayerCounts(dev_data, mem_barrier->subresourceRange, funcName);
8525                auto aspect_mask = mem_barrier->subresourceRange.aspectMask;
8526                skip |= ValidateImageAspectMask(dev_data, image_data->image, format, aspect_mask, funcName);
8527                int layerCount = (mem_barrier->subresourceRange.layerCount == VK_REMAINING_ARRAY_LAYERS)
8528                                     ? 1
8529                                     : mem_barrier->subresourceRange.layerCount;
8530                if ((mem_barrier->subresourceRange.baseArrayLayer + layerCount) > arrayLayers) {
8531                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8532                                    __LINE__, DRAWSTATE_INVALID_BARRIER, "DS",
8533                                    "%s: Subresource must have the sum of the "
8534                                    "baseArrayLayer (%d) and layerCount (%d) be less "
8535                                    "than or equal to the total number of layers (%d).",
8536                                    funcName, mem_barrier->subresourceRange.baseArrayLayer,
8537                                    mem_barrier->subresourceRange.layerCount, arrayLayers);
8538                }
8539                int levelCount = (mem_barrier->subresourceRange.levelCount == VK_REMAINING_MIP_LEVELS)
8540                                     ? 1
8541                                     : mem_barrier->subresourceRange.levelCount;
8542                if ((mem_barrier->subresourceRange.baseMipLevel + levelCount) > mipLevels) {
8543                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8544                                    __LINE__, DRAWSTATE_INVALID_BARRIER, "DS",
8545                                    "%s: Subresource must have the sum of the baseMipLevel "
8546                                    "(%d) and levelCount (%d) be less than or equal to "
8547                                    "the total number of levels (%d).",
8548                                    funcName, mem_barrier->subresourceRange.baseMipLevel, mem_barrier->subresourceRange.levelCount,
8549                                    mipLevels);
8550                }
8551            }
8552        }
8553    }
8554    for (uint32_t i = 0; i < bufferBarrierCount; ++i) {
8555        auto mem_barrier = &pBufferMemBarriers[i];
8556        if (pCB->activeRenderPass) {
8557            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8558                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barriers cannot be used during a render pass.", funcName);
8559        }
8560        if (!mem_barrier) continue;
8561
8562        // Validate buffer barrier queue family indices
8563        if ((mem_barrier->srcQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
8564             mem_barrier->srcQueueFamilyIndex >= dev_data->phys_dev_properties.queue_family_properties.size()) ||
8565            (mem_barrier->dstQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
8566             mem_barrier->dstQueueFamilyIndex >= dev_data->phys_dev_properties.queue_family_properties.size())) {
8567            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8568                            DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
8569                            "%s: Buffer Barrier 0x%" PRIx64
8570                            " has QueueFamilyIndex greater "
8571                            "than the number of QueueFamilies (" PRINTF_SIZE_T_SPECIFIER ") for this device.",
8572                            funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
8573                            dev_data->phys_dev_properties.queue_family_properties.size());
8574        }
8575
8576        auto buffer_state = getBufferState(dev_data, mem_barrier->buffer);
8577        if (buffer_state) {
8578            auto buffer_size = buffer_state->requirements.size;
8579            if (mem_barrier->offset >= buffer_size) {
8580                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8581                                DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64
8582                                                                 " which is not less than total size 0x%" PRIx64 ".",
8583                                funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
8584                                reinterpret_cast<const uint64_t &>(mem_barrier->offset),
8585                                reinterpret_cast<const uint64_t &>(buffer_size));
8586            } else if (mem_barrier->size != VK_WHOLE_SIZE && (mem_barrier->offset + mem_barrier->size > buffer_size)) {
8587                skip |= log_msg(
8588                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8589                    DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64 " and size 0x%" PRIx64
8590                                                     " whose sum is greater than total size 0x%" PRIx64 ".",
8591                    funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
8592                    reinterpret_cast<const uint64_t &>(mem_barrier->offset), reinterpret_cast<const uint64_t &>(mem_barrier->size),
8593                    reinterpret_cast<const uint64_t &>(buffer_size));
8594            }
8595        }
8596    }
8597    return skip;
8598}
8599
8600bool validateEventStageMask(VkQueue queue, GLOBAL_CB_NODE *pCB, uint32_t eventCount, size_t firstEventIndex,
8601                            VkPipelineStageFlags sourceStageMask) {
8602    bool skip_call = false;
8603    VkPipelineStageFlags stageMask = 0;
8604    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
8605    for (uint32_t i = 0; i < eventCount; ++i) {
8606        auto event = pCB->events[firstEventIndex + i];
8607        auto queue_data = dev_data->queueMap.find(queue);
8608        if (queue_data == dev_data->queueMap.end()) return false;
8609        auto event_data = queue_data->second.eventToStageMap.find(event);
8610        if (event_data != queue_data->second.eventToStageMap.end()) {
8611            stageMask |= event_data->second;
8612        } else {
8613            auto global_event_data = getEventNode(dev_data, event);
8614            if (!global_event_data) {
8615                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
8616                                     reinterpret_cast<const uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
8617                                     "Event 0x%" PRIx64 " cannot be waited on if it has never been set.",
8618                                     reinterpret_cast<const uint64_t &>(event));
8619            } else {
8620                stageMask |= global_event_data->stageMask;
8621            }
8622        }
8623    }
8624    // TODO: Need to validate that host_bit is only set if set event is called
8625    // but set event can be called at any time.
8626    if (sourceStageMask != stageMask && sourceStageMask != (stageMask | VK_PIPELINE_STAGE_HOST_BIT)) {
8627        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8628                             VALIDATION_ERROR_00254, "DS",
8629                             "Submitting cmdbuffer with call to VkCmdWaitEvents "
8630                             "using srcStageMask 0x%X which must be the bitwise "
8631                             "OR of the stageMask parameters used in calls to "
8632                             "vkCmdSetEvent and VK_PIPELINE_STAGE_HOST_BIT if "
8633                             "used with vkSetEvent but instead is 0x%X. %s",
8634                             sourceStageMask, stageMask, validation_error_map[VALIDATION_ERROR_00254]);
8635    }
8636    return skip_call;
8637}
8638
8639// Note that we only check bits that HAVE required queueflags -- don't care entries are skipped
8640static std::unordered_map<VkPipelineStageFlags, VkQueueFlags> supported_pipeline_stages_table = {
8641    {VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT},
8642    {VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT},
8643    {VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, VK_QUEUE_GRAPHICS_BIT},
8644    {VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
8645    {VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
8646    {VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
8647    {VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
8648    {VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
8649    {VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT, VK_QUEUE_GRAPHICS_BIT},
8650    {VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT, VK_QUEUE_GRAPHICS_BIT},
8651    {VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_QUEUE_GRAPHICS_BIT},
8652    {VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_QUEUE_COMPUTE_BIT},
8653    {VK_PIPELINE_STAGE_TRANSFER_BIT, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT},
8654    {VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, VK_QUEUE_GRAPHICS_BIT}};
8655
8656static const VkPipelineStageFlags stage_flag_bit_array[] = {VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX,
8657                                                            VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT,
8658                                                            VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
8659                                                            VK_PIPELINE_STAGE_VERTEX_SHADER_BIT,
8660                                                            VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT,
8661                                                            VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT,
8662                                                            VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT,
8663                                                            VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
8664                                                            VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT,
8665                                                            VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
8666                                                            VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
8667                                                            VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
8668                                                            VK_PIPELINE_STAGE_TRANSFER_BIT,
8669                                                            VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT};
8670
8671bool CheckStageMaskQueueCompatibility(layer_data *dev_data, VkCommandBuffer command_buffer, VkPipelineStageFlags stage_mask,
8672                                      VkQueueFlags queue_flags, const char *function, const char *src_or_dest,
8673                                      UNIQUE_VALIDATION_ERROR_CODE error_code) {
8674    bool skip = false;
8675    // Lookup each bit in the stagemask and check for overlap between its table bits and queue_flags
8676    for (const auto &item : stage_flag_bit_array) {
8677        if (stage_mask & item) {
8678            if ((supported_pipeline_stages_table[item] & queue_flags) == 0) {
8679                skip |=
8680                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8681                            reinterpret_cast<uint64_t &>(command_buffer), __LINE__, error_code, "DL",
8682                            "%s(): %s flag %s is not compatible with the queue family properties of this "
8683                            "command buffer. %s",
8684                            function, src_or_dest, string_VkPipelineStageFlagBits(static_cast<VkPipelineStageFlagBits>(item)),
8685                            validation_error_map[error_code]);
8686            }
8687        }
8688    }
8689    return skip;
8690}
8691
8692bool ValidateStageMasksAgainstQueueCapabilities(layer_data *dev_data, GLOBAL_CB_NODE *cb_state,
8693                                                VkPipelineStageFlags source_stage_mask, VkPipelineStageFlags dest_stage_mask,
8694                                                const char *function, UNIQUE_VALIDATION_ERROR_CODE error_code) {
8695    bool skip = false;
8696    uint32_t queue_family_index = dev_data->commandPoolMap[cb_state->createInfo.commandPool].queueFamilyIndex;
8697    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(dev_data->physical_device), instance_layer_data_map);
8698    auto physical_device_state = getPhysicalDeviceState(instance_data, dev_data->physical_device);
8699
8700    // Any pipeline stage included in srcStageMask or dstStageMask must be supported by the capabilities of the queue family
8701    // specified by the queueFamilyIndex member of the VkCommandPoolCreateInfo structure that was used to create the VkCommandPool
8702    // that commandBuffer was allocated from, as specified in the table of supported pipeline stages.
8703
8704    if (queue_family_index < physical_device_state->queue_family_properties.size()) {
8705        VkQueueFlags specified_queue_flags = physical_device_state->queue_family_properties[queue_family_index].queueFlags;
8706
8707        if ((source_stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) == 0) {
8708            skip |= CheckStageMaskQueueCompatibility(dev_data, cb_state->commandBuffer, source_stage_mask, specified_queue_flags,
8709                                                     function, "srcStageMask", error_code);
8710        }
8711        if ((dest_stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) == 0) {
8712            skip |= CheckStageMaskQueueCompatibility(dev_data, cb_state->commandBuffer, dest_stage_mask, specified_queue_flags,
8713                                                     function, "dstStageMask", error_code);
8714        }
8715    }
8716    return skip;
8717}
8718
8719VKAPI_ATTR void VKAPI_CALL CmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
8720                                         VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask,
8721                                         uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
8722                                         uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
8723                                         uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
8724    bool skip = false;
8725    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8726    std::unique_lock<std::mutex> lock(global_lock);
8727    GLOBAL_CB_NODE *cb_state = getCBNode(dev_data, commandBuffer);
8728    if (cb_state) {
8729        skip |= ValidateStageMasksAgainstQueueCapabilities(dev_data, cb_state, sourceStageMask, dstStageMask, "vkCmdWaitEvents",
8730                                                           VALIDATION_ERROR_02510);
8731        skip |= ValidateStageMaskGsTsEnables(dev_data, sourceStageMask, "vkCmdWaitEvents()", VALIDATION_ERROR_02067,
8732                                             VALIDATION_ERROR_02069);
8733        skip |= ValidateStageMaskGsTsEnables(dev_data, dstStageMask, "vkCmdWaitEvents()", VALIDATION_ERROR_02068,
8734                                             VALIDATION_ERROR_02070);
8735        auto first_event_index = cb_state->events.size();
8736        for (uint32_t i = 0; i < eventCount; ++i) {
8737            auto event_state = getEventNode(dev_data, pEvents[i]);
8738            if (event_state) {
8739                addCommandBufferBinding(&event_state->cb_bindings,
8740                                        {reinterpret_cast<const uint64_t &>(pEvents[i]), VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT},
8741                                        cb_state);
8742                event_state->cb_bindings.insert(cb_state);
8743            }
8744            cb_state->waitedEvents.insert(pEvents[i]);
8745            cb_state->events.push_back(pEvents[i]);
8746        }
8747        std::function<bool(VkQueue)> event_update =
8748            std::bind(validateEventStageMask, std::placeholders::_1, cb_state, eventCount, first_event_index, sourceStageMask);
8749        cb_state->eventUpdates.push_back(event_update);
8750        if (cb_state->state == CB_RECORDING) {
8751            skip |= ValidateCmd(dev_data, cb_state, CMD_WAITEVENTS, "vkCmdWaitEvents()");
8752            UpdateCmdBufferLastCmd(dev_data, cb_state, CMD_WAITEVENTS);
8753        } else {
8754            skip |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWaitEvents()");
8755        }
8756        skip |= TransitionImageLayouts(dev_data, commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
8757        skip |= ValidateBarriers("vkCmdWaitEvents()", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8758                                 pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8759    }
8760    lock.unlock();
8761    if (!skip)
8762        dev_data->dispatch_table.CmdWaitEvents(commandBuffer, eventCount, pEvents, sourceStageMask, dstStageMask,
8763                                               memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
8764                                               imageMemoryBarrierCount, pImageMemoryBarriers);
8765}
8766
8767VKAPI_ATTR void VKAPI_CALL CmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
8768                                              VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
8769                                              uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
8770                                              uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
8771                                              uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
8772    bool skip = false;
8773    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8774    std::unique_lock<std::mutex> lock(global_lock);
8775    GLOBAL_CB_NODE *cb_state = getCBNode(dev_data, commandBuffer);
8776    if (cb_state) {
8777        skip |= ValidateStageMasksAgainstQueueCapabilities(dev_data, cb_state, srcStageMask, dstStageMask, "vkCmdPipelineBarrier",
8778                                                           VALIDATION_ERROR_02513);
8779        skip |= ValidateCmd(dev_data, cb_state, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()");
8780        skip |= ValidateStageMaskGsTsEnables(dev_data, srcStageMask, "vkCmdPipelineBarrier()", VALIDATION_ERROR_00265,
8781                                             VALIDATION_ERROR_00267);
8782        skip |= ValidateStageMaskGsTsEnables(dev_data, dstStageMask, "vkCmdPipelineBarrier()", VALIDATION_ERROR_00266,
8783                                             VALIDATION_ERROR_00268);
8784        UpdateCmdBufferLastCmd(dev_data, cb_state, CMD_PIPELINEBARRIER);
8785        skip |= TransitionImageLayouts(dev_data, commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
8786        skip |= ValidateBarriers("vkCmdPipelineBarrier()", commandBuffer, memoryBarrierCount, pMemoryBarriers,
8787                                 bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8788    }
8789    lock.unlock();
8790    if (!skip)
8791        dev_data->dispatch_table.CmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags, memoryBarrierCount,
8792                                                    pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
8793                                                    imageMemoryBarrierCount, pImageMemoryBarriers);
8794}
8795
8796bool setQueryState(VkQueue queue, VkCommandBuffer commandBuffer, QueryObject object, bool value) {
8797    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8798    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8799    if (pCB) {
8800        pCB->queryToStateMap[object] = value;
8801    }
8802    auto queue_data = dev_data->queueMap.find(queue);
8803    if (queue_data != dev_data->queueMap.end()) {
8804        queue_data->second.queryToStateMap[object] = value;
8805    }
8806    return false;
8807}
8808
8809VKAPI_ATTR void VKAPI_CALL CmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) {
8810    bool skip_call = false;
8811    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8812    std::unique_lock<std::mutex> lock(global_lock);
8813    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8814    if (pCB) {
8815        QueryObject query = {queryPool, slot};
8816        pCB->activeQueries.insert(query);
8817        if (!pCB->startedQueries.count(query)) {
8818            pCB->startedQueries.insert(query);
8819        }
8820        skip_call |= ValidateCmd(dev_data, pCB, CMD_BEGINQUERY, "vkCmdBeginQuery()");
8821        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_BEGINQUERY);
8822        addCommandBufferBinding(&getQueryPoolNode(dev_data, queryPool)->cb_bindings,
8823                                {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT}, pCB);
8824    }
8825    lock.unlock();
8826    if (!skip_call) dev_data->dispatch_table.CmdBeginQuery(commandBuffer, queryPool, slot, flags);
8827}
8828
8829VKAPI_ATTR void VKAPI_CALL CmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) {
8830    bool skip_call = false;
8831    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8832    std::unique_lock<std::mutex> lock(global_lock);
8833    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8834    if (pCB) {
8835        QueryObject query = {queryPool, slot};
8836        if (!pCB->activeQueries.count(query)) {
8837            skip_call |=
8838                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8839                        VALIDATION_ERROR_01041, "DS", "Ending a query before it was started: queryPool 0x%" PRIx64 ", index %d. %s",
8840                        (uint64_t)(queryPool), slot, validation_error_map[VALIDATION_ERROR_01041]);
8841        } else {
8842            pCB->activeQueries.erase(query);
8843        }
8844        std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, true);
8845        pCB->queryUpdates.push_back(queryUpdate);
8846        if (pCB->state == CB_RECORDING) {
8847            skip_call |= ValidateCmd(dev_data, pCB, CMD_ENDQUERY, "VkCmdEndQuery()");
8848            UpdateCmdBufferLastCmd(dev_data, pCB, CMD_ENDQUERY);
8849        } else {
8850            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdEndQuery()");
8851        }
8852        addCommandBufferBinding(&getQueryPoolNode(dev_data, queryPool)->cb_bindings,
8853                                {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT}, pCB);
8854    }
8855    lock.unlock();
8856    if (!skip_call) dev_data->dispatch_table.CmdEndQuery(commandBuffer, queryPool, slot);
8857}
8858
8859VKAPI_ATTR void VKAPI_CALL CmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
8860                                             uint32_t queryCount) {
8861    bool skip_call = false;
8862    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8863    std::unique_lock<std::mutex> lock(global_lock);
8864    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8865    if (pCB) {
8866        for (uint32_t i = 0; i < queryCount; i++) {
8867            QueryObject query = {queryPool, firstQuery + i};
8868            pCB->waitedEventsBeforeQueryReset[query] = pCB->waitedEvents;
8869            std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, false);
8870            pCB->queryUpdates.push_back(queryUpdate);
8871        }
8872        if (pCB->state == CB_RECORDING) {
8873            skip_call |= ValidateCmd(dev_data, pCB, CMD_RESETQUERYPOOL, "VkCmdResetQueryPool()");
8874            UpdateCmdBufferLastCmd(dev_data, pCB, CMD_RESETQUERYPOOL);
8875        } else {
8876            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdResetQueryPool()");
8877        }
8878        skip_call |= insideRenderPass(dev_data, pCB, "vkCmdResetQueryPool()", VALIDATION_ERROR_01025);
8879        addCommandBufferBinding(&getQueryPoolNode(dev_data, queryPool)->cb_bindings,
8880                                {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT}, pCB);
8881    }
8882    lock.unlock();
8883    if (!skip_call) dev_data->dispatch_table.CmdResetQueryPool(commandBuffer, queryPool, firstQuery, queryCount);
8884}
8885
8886bool validateQuery(VkQueue queue, GLOBAL_CB_NODE *pCB, VkQueryPool queryPool, uint32_t queryCount, uint32_t firstQuery) {
8887    bool skip_call = false;
8888    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(pCB->commandBuffer), layer_data_map);
8889    auto queue_data = dev_data->queueMap.find(queue);
8890    if (queue_data == dev_data->queueMap.end()) return false;
8891    for (uint32_t i = 0; i < queryCount; i++) {
8892        QueryObject query = {queryPool, firstQuery + i};
8893        auto query_data = queue_data->second.queryToStateMap.find(query);
8894        bool fail = false;
8895        if (query_data != queue_data->second.queryToStateMap.end()) {
8896            if (!query_data->second) {
8897                fail = true;
8898            }
8899        } else {
8900            auto global_query_data = dev_data->queryToStateMap.find(query);
8901            if (global_query_data != dev_data->queryToStateMap.end()) {
8902                if (!global_query_data->second) {
8903                    fail = true;
8904                }
8905            } else {
8906                fail = true;
8907            }
8908        }
8909        if (fail) {
8910            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8911                                 DRAWSTATE_INVALID_QUERY, "DS",
8912                                 "Requesting a copy from query to buffer with invalid query: queryPool 0x%" PRIx64 ", index %d",
8913                                 reinterpret_cast<uint64_t &>(queryPool), firstQuery + i);
8914        }
8915    }
8916    return skip_call;
8917}
8918
8919VKAPI_ATTR void VKAPI_CALL CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
8920                                                   uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset,
8921                                                   VkDeviceSize stride, VkQueryResultFlags flags) {
8922    bool skip_call = false;
8923    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8924    std::unique_lock<std::mutex> lock(global_lock);
8925
8926    auto cb_node = getCBNode(dev_data, commandBuffer);
8927    auto dst_buff_state = getBufferState(dev_data, dstBuffer);
8928    if (cb_node && dst_buff_state) {
8929        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_state, "vkCmdCopyQueryPoolResults()", VALIDATION_ERROR_02526);
8930        // Update bindings between buffer and cmd buffer
8931        AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_state);
8932        // Validate that DST buffer has correct usage flags set
8933        skip_call |=
8934            ValidateBufferUsageFlags(dev_data, dst_buff_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, VALIDATION_ERROR_01066,
8935                                     "vkCmdCopyQueryPoolResults()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8936        std::function<bool()> function = [=]() {
8937            SetBufferMemoryValid(dev_data, dst_buff_state, true);
8938            return false;
8939        };
8940        cb_node->validate_functions.push_back(function);
8941        std::function<bool(VkQueue)> queryUpdate =
8942            std::bind(validateQuery, std::placeholders::_1, cb_node, queryPool, queryCount, firstQuery);
8943        cb_node->queryUpdates.push_back(queryUpdate);
8944        if (cb_node->state == CB_RECORDING) {
8945            skip_call |= ValidateCmd(dev_data, cb_node, CMD_COPYQUERYPOOLRESULTS, "vkCmdCopyQueryPoolResults()");
8946            UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_COPYQUERYPOOLRESULTS);
8947        } else {
8948            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdCopyQueryPoolResults()");
8949        }
8950        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyQueryPoolResults()", VALIDATION_ERROR_01074);
8951        addCommandBufferBinding(&getQueryPoolNode(dev_data, queryPool)->cb_bindings,
8952                                {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT}, cb_node);
8953    } else {
8954        assert(0);
8955    }
8956    lock.unlock();
8957    if (!skip_call)
8958        dev_data->dispatch_table.CmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer, dstOffset,
8959                                                         stride, flags);
8960}
8961
8962VKAPI_ATTR void VKAPI_CALL CmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout, VkShaderStageFlags stageFlags,
8963                                            uint32_t offset, uint32_t size, const void *pValues) {
8964    bool skip_call = false;
8965    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8966    std::unique_lock<std::mutex> lock(global_lock);
8967    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8968    if (pCB) {
8969        if (pCB->state == CB_RECORDING) {
8970            skip_call |= ValidateCmd(dev_data, pCB, CMD_PUSHCONSTANTS, "vkCmdPushConstants()");
8971            UpdateCmdBufferLastCmd(dev_data, pCB, CMD_PUSHCONSTANTS);
8972        } else {
8973            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdPushConstants()");
8974        }
8975    }
8976    skip_call |= validatePushConstantRange(dev_data, offset, size, "vkCmdPushConstants()");
8977    if (0 == stageFlags) {
8978        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8979                             VALIDATION_ERROR_00996, "DS", "vkCmdPushConstants() call has no stageFlags set. %s",
8980                             validation_error_map[VALIDATION_ERROR_00996]);
8981    }
8982
8983    // Check if push constant update is within any of the ranges with the same stage flags specified in pipeline layout.
8984    auto pipeline_layout = getPipelineLayout(dev_data, layout);
8985    // Coalesce adjacent/overlapping pipeline ranges before checking to see if incoming range is
8986    // contained in the pipeline ranges.
8987    // Build a {start, end} span list for ranges with matching stage flags.
8988    const auto &ranges = pipeline_layout->push_constant_ranges;
8989    struct span {
8990        uint32_t start;
8991        uint32_t end;
8992    };
8993    std::vector<span> spans;
8994    spans.reserve(ranges.size());
8995    for (const auto &iter : ranges) {
8996        if (iter.stageFlags == stageFlags) {
8997            spans.push_back({iter.offset, iter.offset + iter.size});
8998        }
8999    }
9000    if (spans.size() == 0) {
9001        // There were no ranges that matched the stageFlags.
9002        skip_call |=
9003            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9004                    VALIDATION_ERROR_00988, "DS", "vkCmdPushConstants() stageFlags = 0x%" PRIx32
9005                                                  " do not match "
9006                                                  "the stageFlags in any of the ranges in pipeline layout 0x%" PRIx64 ". %s",
9007                    (uint32_t)stageFlags, (uint64_t)layout, validation_error_map[VALIDATION_ERROR_00988]);
9008    } else {
9009        // Sort span list by start value.
9010        struct comparer {
9011            bool operator()(struct span i, struct span j) { return i.start < j.start; }
9012        } my_comparer;
9013        std::sort(spans.begin(), spans.end(), my_comparer);
9014
9015        // Examine two spans at a time.
9016        std::vector<span>::iterator current = spans.begin();
9017        std::vector<span>::iterator next = current + 1;
9018        while (next != spans.end()) {
9019            if (current->end < next->start) {
9020                // There is a gap; cannot coalesce. Move to the next two spans.
9021                ++current;
9022                ++next;
9023            } else {
9024                // Coalesce the two spans.  The start of the next span
9025                // is within the current span, so pick the larger of
9026                // the end values to extend the current span.
9027                // Then delete the next span and set next to the span after it.
9028                current->end = max(current->end, next->end);
9029                next = spans.erase(next);
9030            }
9031        }
9032
9033        // Now we can check if the incoming range is within any of the spans.
9034        bool contained_in_a_range = false;
9035        for (uint32_t i = 0; i < spans.size(); ++i) {
9036            if ((offset >= spans[i].start) && ((uint64_t)offset + (uint64_t)size <= (uint64_t)spans[i].end)) {
9037                contained_in_a_range = true;
9038                break;
9039            }
9040        }
9041        if (!contained_in_a_range) {
9042            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9043                                 VALIDATION_ERROR_00988, "DS",
9044                                 "vkCmdPushConstants() Push constant range [%d, %d) "
9045                                 "with stageFlags = 0x%" PRIx32
9046                                 " "
9047                                 "not within flag-matching ranges in pipeline layout 0x%" PRIx64 ". %s",
9048                                 offset, offset + size, (uint32_t)stageFlags, (uint64_t)layout,
9049                                 validation_error_map[VALIDATION_ERROR_00988]);
9050        }
9051    }
9052    lock.unlock();
9053    if (!skip_call) dev_data->dispatch_table.CmdPushConstants(commandBuffer, layout, stageFlags, offset, size, pValues);
9054}
9055
9056VKAPI_ATTR void VKAPI_CALL CmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage,
9057                                             VkQueryPool queryPool, uint32_t slot) {
9058    bool skip_call = false;
9059    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
9060    std::unique_lock<std::mutex> lock(global_lock);
9061    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9062    if (pCB) {
9063        QueryObject query = {queryPool, slot};
9064        std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, true);
9065        pCB->queryUpdates.push_back(queryUpdate);
9066        if (pCB->state == CB_RECORDING) {
9067            skip_call |= ValidateCmd(dev_data, pCB, CMD_WRITETIMESTAMP, "vkCmdWriteTimestamp()");
9068            UpdateCmdBufferLastCmd(dev_data, pCB, CMD_WRITETIMESTAMP);
9069        } else {
9070            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWriteTimestamp()");
9071        }
9072    }
9073    lock.unlock();
9074    if (!skip_call) dev_data->dispatch_table.CmdWriteTimestamp(commandBuffer, pipelineStage, queryPool, slot);
9075}
9076
9077static bool MatchUsage(layer_data *dev_data, uint32_t count, const VkAttachmentReference *attachments,
9078                       const VkFramebufferCreateInfo *fbci, VkImageUsageFlagBits usage_flag,
9079                       UNIQUE_VALIDATION_ERROR_CODE error_code) {
9080    bool skip_call = false;
9081
9082    for (uint32_t attach = 0; attach < count; attach++) {
9083        if (attachments[attach].attachment != VK_ATTACHMENT_UNUSED) {
9084            // Attachment counts are verified elsewhere, but prevent an invalid access
9085            if (attachments[attach].attachment < fbci->attachmentCount) {
9086                const VkImageView *image_view = &fbci->pAttachments[attachments[attach].attachment];
9087                auto view_state = getImageViewState(dev_data, *image_view);
9088                if (view_state) {
9089                    const VkImageCreateInfo *ici = &getImageState(dev_data, view_state->create_info.image)->createInfo;
9090                    if (ici != nullptr) {
9091                        if ((ici->usage & usage_flag) == 0) {
9092                            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9093                                                 (VkDebugReportObjectTypeEXT)0, 0, __LINE__, error_code, "DS",
9094                                                 "vkCreateFramebuffer:  Framebuffer Attachment (%d) conflicts with the image's "
9095                                                 "IMAGE_USAGE flags (%s). %s",
9096                                                 attachments[attach].attachment, string_VkImageUsageFlagBits(usage_flag),
9097                                                 validation_error_map[error_code]);
9098                        }
9099                    }
9100                }
9101            }
9102        }
9103    }
9104    return skip_call;
9105}
9106
9107// Validate VkFramebufferCreateInfo which includes:
9108// 1. attachmentCount equals renderPass attachmentCount
9109// 2. corresponding framebuffer and renderpass attachments have matching formats
9110// 3. corresponding framebuffer and renderpass attachments have matching sample counts
9111// 4. fb attachments only have a single mip level
9112// 5. fb attachment dimensions are each at least as large as the fb
9113// 6. fb attachments use idenity swizzle
9114// 7. fb attachments used by renderPass for color/input/ds have correct usage bit set
9115// 8. fb dimensions are within physical device limits
9116static bool ValidateFramebufferCreateInfo(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) {
9117    bool skip_call = false;
9118
9119    auto rp_state = getRenderPassState(dev_data, pCreateInfo->renderPass);
9120    if (rp_state) {
9121        const VkRenderPassCreateInfo *rpci = rp_state->createInfo.ptr();
9122        if (rpci->attachmentCount != pCreateInfo->attachmentCount) {
9123            skip_call |= log_msg(
9124                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
9125                reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, VALIDATION_ERROR_00404, "DS",
9126                "vkCreateFramebuffer(): VkFramebufferCreateInfo attachmentCount of %u does not match attachmentCount of %u of "
9127                "renderPass (0x%" PRIxLEAST64 ") being used to create Framebuffer. %s",
9128                pCreateInfo->attachmentCount, rpci->attachmentCount, reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass),
9129                validation_error_map[VALIDATION_ERROR_00404]);
9130        } else {
9131            // attachmentCounts match, so make sure corresponding attachment details line up
9132            const VkImageView *image_views = pCreateInfo->pAttachments;
9133            for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
9134                auto view_state = getImageViewState(dev_data, image_views[i]);
9135                auto &ivci = view_state->create_info;
9136                if (ivci.format != rpci->pAttachments[i].format) {
9137                    skip_call |= log_msg(
9138                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
9139                        reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, VALIDATION_ERROR_00408, "DS",
9140                        "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has format of %s that does not match "
9141                        "the format of "
9142                        "%s used by the corresponding attachment for renderPass (0x%" PRIxLEAST64 "). %s",
9143                        i, string_VkFormat(ivci.format), string_VkFormat(rpci->pAttachments[i].format),
9144                        reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), validation_error_map[VALIDATION_ERROR_00408]);
9145                }
9146                const VkImageCreateInfo *ici = &getImageState(dev_data, ivci.image)->createInfo;
9147                if (ici->samples != rpci->pAttachments[i].samples) {
9148                    skip_call |= log_msg(
9149                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
9150                        reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, VALIDATION_ERROR_00409, "DS",
9151                        "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has %s samples that do not match "
9152                        "the %s samples used by the corresponding attachment for renderPass (0x%" PRIxLEAST64 "). %s",
9153                        i, string_VkSampleCountFlagBits(ici->samples), string_VkSampleCountFlagBits(rpci->pAttachments[i].samples),
9154                        reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), validation_error_map[VALIDATION_ERROR_00409]);
9155                }
9156                // Verify that view only has a single mip level
9157                if (ivci.subresourceRange.levelCount != 1) {
9158                    skip_call |=
9159                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
9160                                VALIDATION_ERROR_00411, "DS",
9161                                "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has mip levelCount of %u "
9162                                "but only a single mip level (levelCount ==  1) is allowed when creating a Framebuffer. %s",
9163                                i, ivci.subresourceRange.levelCount, validation_error_map[VALIDATION_ERROR_00411]);
9164                }
9165                const uint32_t mip_level = ivci.subresourceRange.baseMipLevel;
9166                uint32_t mip_width = max(1u, ici->extent.width >> mip_level);
9167                uint32_t mip_height = max(1u, ici->extent.height >> mip_level);
9168                if ((ivci.subresourceRange.layerCount < pCreateInfo->layers) || (mip_width < pCreateInfo->width) ||
9169                    (mip_height < pCreateInfo->height)) {
9170                    skip_call |=
9171                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
9172                                DRAWSTATE_INVALID_FRAMEBUFFER_CREATE_INFO, "DS",
9173                                "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has dimensions smaller "
9174                                "than the corresponding "
9175                                "framebuffer dimensions. Attachment dimensions must be at least as large. Here are the respective "
9176                                "dimensions for "
9177                                "attachment #%u, framebuffer:\n"
9178                                "width: %u, %u\n"
9179                                "height: %u, %u\n"
9180                                "layerCount: %u, %u\n",
9181                                i, ivci.subresourceRange.baseMipLevel, i, mip_width, pCreateInfo->width, mip_height,
9182                                pCreateInfo->height, ivci.subresourceRange.layerCount, pCreateInfo->layers);
9183                }
9184                if (((ivci.components.r != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.r != VK_COMPONENT_SWIZZLE_R)) ||
9185                    ((ivci.components.g != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.g != VK_COMPONENT_SWIZZLE_G)) ||
9186                    ((ivci.components.b != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.b != VK_COMPONENT_SWIZZLE_B)) ||
9187                    ((ivci.components.a != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.a != VK_COMPONENT_SWIZZLE_A))) {
9188                    skip_call |= log_msg(
9189                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
9190                        VALIDATION_ERROR_00412, "DS",
9191                        "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has non-identy swizzle. All framebuffer "
9192                        "attachments must have been created with the identity swizzle. Here are the actual swizzle values:\n"
9193                        "r swizzle = %s\n"
9194                        "g swizzle = %s\n"
9195                        "b swizzle = %s\n"
9196                        "a swizzle = %s\n"
9197                        "%s",
9198                        i, string_VkComponentSwizzle(ivci.components.r), string_VkComponentSwizzle(ivci.components.g),
9199                        string_VkComponentSwizzle(ivci.components.b), string_VkComponentSwizzle(ivci.components.a),
9200                        validation_error_map[VALIDATION_ERROR_00412]);
9201                }
9202            }
9203        }
9204        // Verify correct attachment usage flags
9205        for (uint32_t subpass = 0; subpass < rpci->subpassCount; subpass++) {
9206            // Verify input attachments:
9207            skip_call |=
9208                MatchUsage(dev_data, rpci->pSubpasses[subpass].inputAttachmentCount, rpci->pSubpasses[subpass].pInputAttachments,
9209                           pCreateInfo, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, VALIDATION_ERROR_00407);
9210            // Verify color attachments:
9211            skip_call |=
9212                MatchUsage(dev_data, rpci->pSubpasses[subpass].colorAttachmentCount, rpci->pSubpasses[subpass].pColorAttachments,
9213                           pCreateInfo, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VALIDATION_ERROR_00405);
9214            // Verify depth/stencil attachments:
9215            if (rpci->pSubpasses[subpass].pDepthStencilAttachment != nullptr) {
9216                skip_call |= MatchUsage(dev_data, 1, rpci->pSubpasses[subpass].pDepthStencilAttachment, pCreateInfo,
9217                                        VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VALIDATION_ERROR_00406);
9218            }
9219        }
9220    }
9221    // Verify FB dimensions are within physical device limits
9222    if (pCreateInfo->width > dev_data->phys_dev_properties.properties.limits.maxFramebufferWidth) {
9223        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
9224                             VALIDATION_ERROR_00413, "DS",
9225                             "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo width exceeds physical device limits. "
9226                             "Requested width: %u, device max: %u\n"
9227                             "%s",
9228                             pCreateInfo->width, dev_data->phys_dev_properties.properties.limits.maxFramebufferWidth,
9229                             validation_error_map[VALIDATION_ERROR_00413]);
9230    }
9231    if (pCreateInfo->height > dev_data->phys_dev_properties.properties.limits.maxFramebufferHeight) {
9232        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
9233                             VALIDATION_ERROR_00414, "DS",
9234                             "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo height exceeds physical device limits. "
9235                             "Requested height: %u, device max: %u\n"
9236                             "%s",
9237                             pCreateInfo->height, dev_data->phys_dev_properties.properties.limits.maxFramebufferHeight,
9238                             validation_error_map[VALIDATION_ERROR_00414]);
9239    }
9240    if (pCreateInfo->layers > dev_data->phys_dev_properties.properties.limits.maxFramebufferLayers) {
9241        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
9242                             VALIDATION_ERROR_00415, "DS",
9243                             "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo layers exceeds physical device limits. "
9244                             "Requested layers: %u, device max: %u\n"
9245                             "%s",
9246                             pCreateInfo->layers, dev_data->phys_dev_properties.properties.limits.maxFramebufferLayers,
9247                             validation_error_map[VALIDATION_ERROR_00415]);
9248    }
9249    return skip_call;
9250}
9251
9252// Validate VkFramebufferCreateInfo state prior to calling down chain to create Framebuffer object
9253//  Return true if an error is encountered and callback returns true to skip call down chain
9254//   false indicates that call down chain should proceed
9255static bool PreCallValidateCreateFramebuffer(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) {
9256    // TODO : Verify that renderPass FB is created with is compatible with FB
9257    bool skip_call = false;
9258    skip_call |= ValidateFramebufferCreateInfo(dev_data, pCreateInfo);
9259    return skip_call;
9260}
9261
9262// CreateFramebuffer state has been validated and call down chain completed so record new framebuffer object
9263static void PostCallRecordCreateFramebuffer(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo, VkFramebuffer fb) {
9264    // Shadow create info and store in map
9265    std::unique_ptr<FRAMEBUFFER_STATE> fb_state(
9266        new FRAMEBUFFER_STATE(fb, pCreateInfo, dev_data->renderPassMap[pCreateInfo->renderPass]->createInfo.ptr()));
9267
9268    for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
9269        VkImageView view = pCreateInfo->pAttachments[i];
9270        auto view_state = getImageViewState(dev_data, view);
9271        if (!view_state) {
9272            continue;
9273        }
9274        MT_FB_ATTACHMENT_INFO fb_info;
9275        fb_info.mem = getImageState(dev_data, view_state->create_info.image)->binding.mem;
9276        fb_info.view_state = view_state;
9277        fb_info.image = view_state->create_info.image;
9278        fb_state->attachments.push_back(fb_info);
9279    }
9280    dev_data->frameBufferMap[fb] = std::move(fb_state);
9281}
9282
9283VKAPI_ATTR VkResult VKAPI_CALL CreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo,
9284                                                 const VkAllocationCallbacks *pAllocator, VkFramebuffer *pFramebuffer) {
9285    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9286    std::unique_lock<std::mutex> lock(global_lock);
9287    bool skip_call = PreCallValidateCreateFramebuffer(dev_data, pCreateInfo);
9288    lock.unlock();
9289
9290    if (skip_call) return VK_ERROR_VALIDATION_FAILED_EXT;
9291
9292    VkResult result = dev_data->dispatch_table.CreateFramebuffer(device, pCreateInfo, pAllocator, pFramebuffer);
9293
9294    if (VK_SUCCESS == result) {
9295        lock.lock();
9296        PostCallRecordCreateFramebuffer(dev_data, pCreateInfo, *pFramebuffer);
9297        lock.unlock();
9298    }
9299    return result;
9300}
9301
9302static bool FindDependency(const int index, const int dependent, const std::vector<DAGNode> &subpass_to_node,
9303                           std::unordered_set<uint32_t> &processed_nodes) {
9304    // If we have already checked this node we have not found a dependency path so return false.
9305    if (processed_nodes.count(index)) return false;
9306    processed_nodes.insert(index);
9307    const DAGNode &node = subpass_to_node[index];
9308    // Look for a dependency path. If one exists return true else recurse on the previous nodes.
9309    if (std::find(node.prev.begin(), node.prev.end(), static_cast<uint32_t>(dependent)) == node.prev.end()) {
9310        for (auto elem : node.prev) {
9311            if (FindDependency(elem, dependent, subpass_to_node, processed_nodes)) return true;
9312        }
9313    } else {
9314        return true;
9315    }
9316    return false;
9317}
9318
9319static bool CheckDependencyExists(const layer_data *dev_data, const int subpass, const std::vector<uint32_t> &dependent_subpasses,
9320                                  const std::vector<DAGNode> &subpass_to_node, bool &skip_call) {
9321    bool result = true;
9322    // Loop through all subpasses that share the same attachment and make sure a dependency exists
9323    for (uint32_t k = 0; k < dependent_subpasses.size(); ++k) {
9324        if (static_cast<uint32_t>(subpass) == dependent_subpasses[k]) continue;
9325        const DAGNode &node = subpass_to_node[subpass];
9326        // Check for a specified dependency between the two nodes. If one exists we are done.
9327        auto prev_elem = std::find(node.prev.begin(), node.prev.end(), dependent_subpasses[k]);
9328        auto next_elem = std::find(node.next.begin(), node.next.end(), dependent_subpasses[k]);
9329        if (prev_elem == node.prev.end() && next_elem == node.next.end()) {
9330            // If no dependency exits an implicit dependency still might. If not, throw an error.
9331            std::unordered_set<uint32_t> processed_nodes;
9332            if (!(FindDependency(subpass, dependent_subpasses[k], subpass_to_node, processed_nodes) ||
9333                  FindDependency(dependent_subpasses[k], subpass, subpass_to_node, processed_nodes))) {
9334                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9335                                     __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9336                                     "A dependency between subpasses %d and %d must exist but one is not specified.", subpass,
9337                                     dependent_subpasses[k]);
9338                result = false;
9339            }
9340        }
9341    }
9342    return result;
9343}
9344
9345static bool CheckPreserved(const layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo, const int index,
9346                           const uint32_t attachment, const std::vector<DAGNode> &subpass_to_node, int depth, bool &skip_call) {
9347    const DAGNode &node = subpass_to_node[index];
9348    // If this node writes to the attachment return true as next nodes need to preserve the attachment.
9349    const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
9350    for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9351        if (attachment == subpass.pColorAttachments[j].attachment) return true;
9352    }
9353    if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9354        if (attachment == subpass.pDepthStencilAttachment->attachment) return true;
9355    }
9356    bool result = false;
9357    // Loop through previous nodes and see if any of them write to the attachment.
9358    for (auto elem : node.prev) {
9359        result |= CheckPreserved(dev_data, pCreateInfo, elem, attachment, subpass_to_node, depth + 1, skip_call);
9360    }
9361    // If the attachment was written to by a previous node than this node needs to preserve it.
9362    if (result && depth > 0) {
9363        bool has_preserved = false;
9364        for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
9365            if (subpass.pPreserveAttachments[j] == attachment) {
9366                has_preserved = true;
9367                break;
9368            }
9369        }
9370        if (!has_preserved) {
9371            skip_call |=
9372                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9373                        DRAWSTATE_INVALID_RENDERPASS, "DS",
9374                        "Attachment %d is used by a later subpass and must be preserved in subpass %d.", attachment, index);
9375        }
9376    }
9377    return result;
9378}
9379
9380template <class T>
9381bool isRangeOverlapping(T offset1, T size1, T offset2, T size2) {
9382    return (((offset1 + size1) > offset2) && ((offset1 + size1) < (offset2 + size2))) ||
9383           ((offset1 > offset2) && (offset1 < (offset2 + size2)));
9384}
9385
9386bool isRegionOverlapping(VkImageSubresourceRange range1, VkImageSubresourceRange range2) {
9387    return (isRangeOverlapping(range1.baseMipLevel, range1.levelCount, range2.baseMipLevel, range2.levelCount) &&
9388            isRangeOverlapping(range1.baseArrayLayer, range1.layerCount, range2.baseArrayLayer, range2.layerCount));
9389}
9390
9391static bool ValidateDependencies(const layer_data *dev_data, FRAMEBUFFER_STATE const *framebuffer,
9392                                 RENDER_PASS_STATE const *renderPass) {
9393    bool skip_call = false;
9394    auto const pFramebufferInfo = framebuffer->createInfo.ptr();
9395    auto const pCreateInfo = renderPass->createInfo.ptr();
9396    auto const &subpass_to_node = renderPass->subpassToNode;
9397    std::vector<std::vector<uint32_t>> output_attachment_to_subpass(pCreateInfo->attachmentCount);
9398    std::vector<std::vector<uint32_t>> input_attachment_to_subpass(pCreateInfo->attachmentCount);
9399    std::vector<std::vector<uint32_t>> overlapping_attachments(pCreateInfo->attachmentCount);
9400    // Find overlapping attachments
9401    for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
9402        for (uint32_t j = i + 1; j < pCreateInfo->attachmentCount; ++j) {
9403            VkImageView viewi = pFramebufferInfo->pAttachments[i];
9404            VkImageView viewj = pFramebufferInfo->pAttachments[j];
9405            if (viewi == viewj) {
9406                overlapping_attachments[i].push_back(j);
9407                overlapping_attachments[j].push_back(i);
9408                continue;
9409            }
9410            auto view_state_i = getImageViewState(dev_data, viewi);
9411            auto view_state_j = getImageViewState(dev_data, viewj);
9412            if (!view_state_i || !view_state_j) {
9413                continue;
9414            }
9415            auto view_ci_i = view_state_i->create_info;
9416            auto view_ci_j = view_state_j->create_info;
9417            if (view_ci_i.image == view_ci_j.image && isRegionOverlapping(view_ci_i.subresourceRange, view_ci_j.subresourceRange)) {
9418                overlapping_attachments[i].push_back(j);
9419                overlapping_attachments[j].push_back(i);
9420                continue;
9421            }
9422            auto image_data_i = getImageState(dev_data, view_ci_i.image);
9423            auto image_data_j = getImageState(dev_data, view_ci_j.image);
9424            if (!image_data_i || !image_data_j) {
9425                continue;
9426            }
9427            if (image_data_i->binding.mem == image_data_j->binding.mem &&
9428                isRangeOverlapping(image_data_i->binding.offset, image_data_i->binding.size, image_data_j->binding.offset,
9429                                   image_data_j->binding.size)) {
9430                overlapping_attachments[i].push_back(j);
9431                overlapping_attachments[j].push_back(i);
9432            }
9433        }
9434    }
9435    for (uint32_t i = 0; i < overlapping_attachments.size(); ++i) {
9436        uint32_t attachment = i;
9437        for (auto other_attachment : overlapping_attachments[i]) {
9438            if (!(pCreateInfo->pAttachments[attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
9439                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9440                                     __LINE__, VALIDATION_ERROR_00324, "DS",
9441                                     "Attachment %d aliases attachment %d but doesn't "
9442                                     "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT. %s",
9443                                     attachment, other_attachment, validation_error_map[VALIDATION_ERROR_00324]);
9444            }
9445            if (!(pCreateInfo->pAttachments[other_attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
9446                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9447                                     __LINE__, VALIDATION_ERROR_00324, "DS",
9448                                     "Attachment %d aliases attachment %d but doesn't "
9449                                     "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT. %s",
9450                                     other_attachment, attachment, validation_error_map[VALIDATION_ERROR_00324]);
9451            }
9452        }
9453    }
9454    // Find for each attachment the subpasses that use them.
9455    unordered_set<uint32_t> attachmentIndices;
9456    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9457        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9458        attachmentIndices.clear();
9459        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9460            uint32_t attachment = subpass.pInputAttachments[j].attachment;
9461            if (attachment == VK_ATTACHMENT_UNUSED) continue;
9462            input_attachment_to_subpass[attachment].push_back(i);
9463            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
9464                input_attachment_to_subpass[overlapping_attachment].push_back(i);
9465            }
9466        }
9467        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9468            uint32_t attachment = subpass.pColorAttachments[j].attachment;
9469            if (attachment == VK_ATTACHMENT_UNUSED) continue;
9470            output_attachment_to_subpass[attachment].push_back(i);
9471            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
9472                output_attachment_to_subpass[overlapping_attachment].push_back(i);
9473            }
9474            attachmentIndices.insert(attachment);
9475        }
9476        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9477            uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
9478            output_attachment_to_subpass[attachment].push_back(i);
9479            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
9480                output_attachment_to_subpass[overlapping_attachment].push_back(i);
9481            }
9482
9483            if (attachmentIndices.count(attachment)) {
9484                skip_call |=
9485                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9486                            DRAWSTATE_INVALID_RENDERPASS, "DS",
9487                            "Cannot use same attachment (%u) as both color and depth output in same subpass (%u).", attachment, i);
9488            }
9489        }
9490    }
9491    // If there is a dependency needed make sure one exists
9492    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9493        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9494        // If the attachment is an input then all subpasses that output must have a dependency relationship
9495        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9496            uint32_t attachment = subpass.pInputAttachments[j].attachment;
9497            if (attachment == VK_ATTACHMENT_UNUSED) continue;
9498            CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9499        }
9500        // If the attachment is an output then all subpasses that use the attachment must have a dependency relationship
9501        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9502            uint32_t attachment = subpass.pColorAttachments[j].attachment;
9503            if (attachment == VK_ATTACHMENT_UNUSED) continue;
9504            CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9505            CheckDependencyExists(dev_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9506        }
9507        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9508            const uint32_t &attachment = subpass.pDepthStencilAttachment->attachment;
9509            CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9510            CheckDependencyExists(dev_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9511        }
9512    }
9513    // Loop through implicit dependencies, if this pass reads make sure the attachment is preserved for all passes after it was
9514    // written.
9515    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9516        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9517        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9518            CheckPreserved(dev_data, pCreateInfo, i, subpass.pInputAttachments[j].attachment, subpass_to_node, 0, skip_call);
9519        }
9520    }
9521    return skip_call;
9522}
9523
9524static bool CreatePassDAG(const layer_data *dev_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
9525                          std::vector<DAGNode> &subpass_to_node, std::vector<bool> &has_self_dependency) {
9526    bool skip_call = false;
9527    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9528        DAGNode &subpass_node = subpass_to_node[i];
9529        subpass_node.pass = i;
9530    }
9531    for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
9532        const VkSubpassDependency &dependency = pCreateInfo->pDependencies[i];
9533        if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL || dependency.dstSubpass == VK_SUBPASS_EXTERNAL) {
9534            if (dependency.srcSubpass == dependency.dstSubpass) {
9535                skip_call |=
9536                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9537                            DRAWSTATE_INVALID_RENDERPASS, "DS", "The src and dest subpasses cannot both be external.");
9538            }
9539        } else if (dependency.srcSubpass > dependency.dstSubpass) {
9540            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9541                                 DRAWSTATE_INVALID_RENDERPASS, "DS",
9542                                 "Depedency graph must be specified such that an earlier pass cannot depend on a later pass.");
9543        } else if (dependency.srcSubpass == dependency.dstSubpass) {
9544            has_self_dependency[dependency.srcSubpass] = true;
9545        } else {
9546            subpass_to_node[dependency.dstSubpass].prev.push_back(dependency.srcSubpass);
9547            subpass_to_node[dependency.srcSubpass].next.push_back(dependency.dstSubpass);
9548        }
9549    }
9550    return skip_call;
9551}
9552
9553VKAPI_ATTR VkResult VKAPI_CALL CreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo,
9554                                                  const VkAllocationCallbacks *pAllocator, VkShaderModule *pShaderModule) {
9555    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9556    bool skip_call = false;
9557    spv_result_t spv_valid = SPV_SUCCESS;
9558
9559    if (!GetDisables(dev_data)->shader_validation) {
9560        // Use SPIRV-Tools validator to try and catch any issues with the module itself
9561        spv_context ctx = spvContextCreate(SPV_ENV_VULKAN_1_0);
9562        spv_const_binary_t binary{pCreateInfo->pCode, pCreateInfo->codeSize / sizeof(uint32_t)};
9563        spv_diagnostic diag = nullptr;
9564
9565        spv_valid = spvValidate(ctx, &binary, &diag);
9566        if (spv_valid != SPV_SUCCESS) {
9567            if (!dev_data->device_extensions.nv_glsl_shader_enabled || (pCreateInfo->pCode[0] == spv::MagicNumber)) {
9568                skip_call |= log_msg(dev_data->report_data,
9569                    spv_valid == SPV_WARNING ? VK_DEBUG_REPORT_WARNING_BIT_EXT : VK_DEBUG_REPORT_ERROR_BIT_EXT,
9570                    VkDebugReportObjectTypeEXT(0), 0, __LINE__, SHADER_CHECKER_INCONSISTENT_SPIRV, "SC",
9571                    "SPIR-V module not valid: %s", diag && diag->error ? diag->error : "(no error text)");
9572            }
9573        }
9574
9575        spvDiagnosticDestroy(diag);
9576        spvContextDestroy(ctx);
9577
9578        if (skip_call) return VK_ERROR_VALIDATION_FAILED_EXT;
9579    }
9580
9581    VkResult res = dev_data->dispatch_table.CreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule);
9582
9583    if (res == VK_SUCCESS && !GetDisables(dev_data)->shader_validation) {
9584        std::lock_guard<std::mutex> lock(global_lock);
9585        const auto new_shader_module = (SPV_SUCCESS == spv_valid ? new shader_module(pCreateInfo) : new shader_module());
9586        dev_data->shaderModuleMap[*pShaderModule] = unique_ptr<shader_module>(new_shader_module);
9587    }
9588    return res;
9589}
9590
9591static bool ValidateAttachmentIndex(layer_data *dev_data, uint32_t attachment, uint32_t attachment_count, const char *type) {
9592    bool skip_call = false;
9593    if (attachment >= attachment_count && attachment != VK_ATTACHMENT_UNUSED) {
9594        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9595                             VALIDATION_ERROR_00325, "DS",
9596                             "CreateRenderPass: %s attachment %d must be less than the total number of attachments %d. %s", type,
9597                             attachment, attachment_count, validation_error_map[VALIDATION_ERROR_00325]);
9598    }
9599    return skip_call;
9600}
9601
9602static bool IsPowerOfTwo(unsigned x) { return x && !(x & (x - 1)); }
9603
9604static bool ValidateRenderpassAttachmentUsage(layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo) {
9605    bool skip_call = false;
9606    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9607        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9608        if (subpass.pipelineBindPoint != VK_PIPELINE_BIND_POINT_GRAPHICS) {
9609            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9610                                 VALIDATION_ERROR_00347, "DS",
9611                                 "CreateRenderPass: Pipeline bind point for subpass %d must be VK_PIPELINE_BIND_POINT_GRAPHICS. %s",
9612                                 i, validation_error_map[VALIDATION_ERROR_00347]);
9613        }
9614        for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
9615            uint32_t attachment = subpass.pPreserveAttachments[j];
9616            if (attachment == VK_ATTACHMENT_UNUSED) {
9617                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9618                                     __LINE__, VALIDATION_ERROR_00356, "DS",
9619                                     "CreateRenderPass:  Preserve attachment (%d) must not be VK_ATTACHMENT_UNUSED. %s", j,
9620                                     validation_error_map[VALIDATION_ERROR_00356]);
9621            } else {
9622                skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Preserve");
9623            }
9624        }
9625
9626        auto subpass_performs_resolve =
9627            subpass.pResolveAttachments &&
9628            std::any_of(subpass.pResolveAttachments, subpass.pResolveAttachments + subpass.colorAttachmentCount,
9629                        [](VkAttachmentReference ref) { return ref.attachment != VK_ATTACHMENT_UNUSED; });
9630
9631        unsigned sample_count = 0;
9632
9633        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9634            uint32_t attachment;
9635            if (subpass.pResolveAttachments) {
9636                attachment = subpass.pResolveAttachments[j].attachment;
9637                skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Resolve");
9638
9639                if (!skip_call && attachment != VK_ATTACHMENT_UNUSED &&
9640                    pCreateInfo->pAttachments[attachment].samples != VK_SAMPLE_COUNT_1_BIT) {
9641                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
9642                                         __LINE__, VALIDATION_ERROR_00352, "DS",
9643                                         "CreateRenderPass:  Subpass %u requests multisample resolve into attachment %u, "
9644                                         "which must have VK_SAMPLE_COUNT_1_BIT but has %s. %s",
9645                                         i, attachment, string_VkSampleCountFlagBits(pCreateInfo->pAttachments[attachment].samples),
9646                                         validation_error_map[VALIDATION_ERROR_00352]);
9647                }
9648            }
9649            attachment = subpass.pColorAttachments[j].attachment;
9650            skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Color");
9651
9652            if (!skip_call && attachment != VK_ATTACHMENT_UNUSED) {
9653                sample_count |= (unsigned)pCreateInfo->pAttachments[attachment].samples;
9654
9655                if (subpass_performs_resolve && pCreateInfo->pAttachments[attachment].samples == VK_SAMPLE_COUNT_1_BIT) {
9656                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
9657                                         __LINE__, VALIDATION_ERROR_00351, "DS",
9658                                         "CreateRenderPass:  Subpass %u requests multisample resolve from attachment %u "
9659                                         "which has VK_SAMPLE_COUNT_1_BIT. %s",
9660                                         i, attachment, validation_error_map[VALIDATION_ERROR_00351]);
9661                }
9662            }
9663        }
9664
9665        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9666            uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
9667            skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Depth stencil");
9668
9669            if (!skip_call && attachment != VK_ATTACHMENT_UNUSED) {
9670                sample_count |= (unsigned)pCreateInfo->pAttachments[attachment].samples;
9671            }
9672        }
9673
9674        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9675            uint32_t attachment = subpass.pInputAttachments[j].attachment;
9676            skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Input");
9677        }
9678
9679        if (sample_count && !IsPowerOfTwo(sample_count)) {
9680            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
9681                                 VALIDATION_ERROR_00337, "DS",
9682                                 "CreateRenderPass:  Subpass %u attempts to render to "
9683                                 "attachments with inconsistent sample counts. %s",
9684                                 i, validation_error_map[VALIDATION_ERROR_00337]);
9685        }
9686    }
9687    return skip_call;
9688}
9689
9690VKAPI_ATTR VkResult VKAPI_CALL CreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
9691                                                const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) {
9692    bool skip_call = false;
9693    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9694
9695    std::unique_lock<std::mutex> lock(global_lock);
9696    // TODO: As part of wrapping up the mem_tracker/core_validation merge the following routine should be consolidated with
9697    //       ValidateLayouts.
9698    skip_call |= ValidateRenderpassAttachmentUsage(dev_data, pCreateInfo);
9699    for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
9700        skip_call |= ValidateStageMaskGsTsEnables(dev_data, pCreateInfo->pDependencies[i].srcStageMask, "vkCreateRenderPass()",
9701                                                  VALIDATION_ERROR_00368, VALIDATION_ERROR_00370);
9702        skip_call |= ValidateStageMaskGsTsEnables(dev_data, pCreateInfo->pDependencies[i].dstStageMask, "vkCreateRenderPass()",
9703                                                  VALIDATION_ERROR_00369, VALIDATION_ERROR_00371);
9704    }
9705    if (!skip_call) {
9706        skip_call |= ValidateLayouts(dev_data, device, pCreateInfo);
9707    }
9708    lock.unlock();
9709
9710    if (skip_call) {
9711        return VK_ERROR_VALIDATION_FAILED_EXT;
9712    }
9713
9714    VkResult result = dev_data->dispatch_table.CreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass);
9715
9716    if (VK_SUCCESS == result) {
9717        lock.lock();
9718
9719        std::vector<bool> has_self_dependency(pCreateInfo->subpassCount);
9720        std::vector<DAGNode> subpass_to_node(pCreateInfo->subpassCount);
9721        skip_call |= CreatePassDAG(dev_data, device, pCreateInfo, subpass_to_node, has_self_dependency);
9722
9723        auto render_pass = unique_ptr<RENDER_PASS_STATE>(new RENDER_PASS_STATE(pCreateInfo));
9724        render_pass->renderPass = *pRenderPass;
9725        render_pass->hasSelfDependency = has_self_dependency;
9726        render_pass->subpassToNode = subpass_to_node;
9727
9728        // TODO: Maybe fill list and then copy instead of locking
9729        std::unordered_map<uint32_t, bool> &attachment_first_read = render_pass->attachment_first_read;
9730        std::unordered_map<uint32_t, VkImageLayout> &attachment_first_layout = render_pass->attachment_first_layout;
9731        for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9732            const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9733            for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9734                uint32_t attachment = subpass.pColorAttachments[j].attachment;
9735                if (!attachment_first_read.count(attachment)) {
9736                    attachment_first_read.insert(std::make_pair(attachment, false));
9737                    attachment_first_layout.insert(std::make_pair(attachment, subpass.pColorAttachments[j].layout));
9738                }
9739            }
9740            if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9741                uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
9742                if (!attachment_first_read.count(attachment)) {
9743                    attachment_first_read.insert(std::make_pair(attachment, false));
9744                    attachment_first_layout.insert(std::make_pair(attachment, subpass.pDepthStencilAttachment->layout));
9745                }
9746            }
9747            for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9748                uint32_t attachment = subpass.pInputAttachments[j].attachment;
9749                if (!attachment_first_read.count(attachment)) {
9750                    attachment_first_read.insert(std::make_pair(attachment, true));
9751                    attachment_first_layout.insert(std::make_pair(attachment, subpass.pInputAttachments[j].layout));
9752                }
9753            }
9754        }
9755
9756        dev_data->renderPassMap[*pRenderPass] = std::move(render_pass);
9757    }
9758    return result;
9759}
9760
9761static bool validatePrimaryCommandBuffer(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const std::string &cmd_name,
9762                                         UNIQUE_VALIDATION_ERROR_CODE error_code) {
9763    bool skip_call = false;
9764    if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
9765        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9766                             error_code, "DS", "Cannot execute command %s on a secondary command buffer. %s", cmd_name.c_str(),
9767                             validation_error_map[error_code]);
9768    }
9769    return skip_call;
9770}
9771
9772static bool VerifyRenderAreaBounds(const layer_data *dev_data, const VkRenderPassBeginInfo *pRenderPassBegin) {
9773    bool skip_call = false;
9774    const safe_VkFramebufferCreateInfo *pFramebufferInfo =
9775        &getFramebufferState(dev_data, pRenderPassBegin->framebuffer)->createInfo;
9776    if (pRenderPassBegin->renderArea.offset.x < 0 ||
9777        (pRenderPassBegin->renderArea.offset.x + pRenderPassBegin->renderArea.extent.width) > pFramebufferInfo->width ||
9778        pRenderPassBegin->renderArea.offset.y < 0 ||
9779        (pRenderPassBegin->renderArea.offset.y + pRenderPassBegin->renderArea.extent.height) > pFramebufferInfo->height) {
9780        skip_call |= static_cast<bool>(log_msg(
9781            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9782            DRAWSTATE_INVALID_RENDER_AREA, "CORE",
9783            "Cannot execute a render pass with renderArea not within the bound of the "
9784            "framebuffer. RenderArea: x %d, y %d, width %d, height %d. Framebuffer: width %d, "
9785            "height %d.",
9786            pRenderPassBegin->renderArea.offset.x, pRenderPassBegin->renderArea.offset.y, pRenderPassBegin->renderArea.extent.width,
9787            pRenderPassBegin->renderArea.extent.height, pFramebufferInfo->width, pFramebufferInfo->height));
9788    }
9789    return skip_call;
9790}
9791
9792// If this is a stencil format, make sure the stencil[Load|Store]Op flag is checked, while if it is a depth/color attachment the
9793// [load|store]Op flag must be checked
9794// TODO: The memory valid flag in DEVICE_MEM_INFO should probably be split to track the validity of stencil memory separately.
9795template <typename T>
9796static bool FormatSpecificLoadAndStoreOpSettings(VkFormat format, T color_depth_op, T stencil_op, T op) {
9797    if (color_depth_op != op && stencil_op != op) {
9798        return false;
9799    }
9800    bool check_color_depth_load_op = !vk_format_is_stencil_only(format);
9801    bool check_stencil_load_op = vk_format_is_depth_and_stencil(format) || !check_color_depth_load_op;
9802
9803    return (((check_color_depth_load_op == true) && (color_depth_op == op)) ||
9804            ((check_stencil_load_op == true) && (stencil_op == op)));
9805}
9806
9807VKAPI_ATTR void VKAPI_CALL CmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
9808                                              VkSubpassContents contents) {
9809    bool skip_call = false;
9810    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
9811    std::unique_lock<std::mutex> lock(global_lock);
9812    GLOBAL_CB_NODE *cb_node = getCBNode(dev_data, commandBuffer);
9813    auto render_pass_state = pRenderPassBegin ? getRenderPassState(dev_data, pRenderPassBegin->renderPass) : nullptr;
9814    auto framebuffer = pRenderPassBegin ? getFramebufferState(dev_data, pRenderPassBegin->framebuffer) : nullptr;
9815    if (cb_node) {
9816        if (render_pass_state) {
9817            uint32_t clear_op_size = 0;  // Make sure pClearValues is at least as large as last LOAD_OP_CLEAR
9818            cb_node->activeFramebuffer = pRenderPassBegin->framebuffer;
9819            for (uint32_t i = 0; i < render_pass_state->createInfo.attachmentCount; ++i) {
9820                MT_FB_ATTACHMENT_INFO &fb_info = framebuffer->attachments[i];
9821                auto pAttachment = &render_pass_state->createInfo.pAttachments[i];
9822                if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp, pAttachment->stencilLoadOp,
9823                                                         VK_ATTACHMENT_LOAD_OP_CLEAR)) {
9824                    clear_op_size = static_cast<uint32_t>(i) + 1;
9825                    std::function<bool()> function = [=]() {
9826                        SetImageMemoryValid(dev_data, getImageState(dev_data, fb_info.image), true);
9827                        return false;
9828                    };
9829                    cb_node->validate_functions.push_back(function);
9830                } else if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp,
9831                                                                pAttachment->stencilLoadOp, VK_ATTACHMENT_LOAD_OP_DONT_CARE)) {
9832                    std::function<bool()> function = [=]() {
9833                        SetImageMemoryValid(dev_data, getImageState(dev_data, fb_info.image), false);
9834                        return false;
9835                    };
9836                    cb_node->validate_functions.push_back(function);
9837                } else if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp,
9838                                                                pAttachment->stencilLoadOp, VK_ATTACHMENT_LOAD_OP_LOAD)) {
9839                    std::function<bool()> function = [=]() {
9840                        return ValidateImageMemoryIsValid(dev_data, getImageState(dev_data, fb_info.image),
9841                                                          "vkCmdBeginRenderPass()");
9842                    };
9843                    cb_node->validate_functions.push_back(function);
9844                }
9845                if (render_pass_state->attachment_first_read[i]) {
9846                    std::function<bool()> function = [=]() {
9847                        return ValidateImageMemoryIsValid(dev_data, getImageState(dev_data, fb_info.image),
9848                                                          "vkCmdBeginRenderPass()");
9849                    };
9850                    cb_node->validate_functions.push_back(function);
9851                }
9852            }
9853            if (clear_op_size > pRenderPassBegin->clearValueCount) {
9854                skip_call |= log_msg(
9855                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
9856                    reinterpret_cast<uint64_t &>(render_pass_state->renderPass), __LINE__, VALIDATION_ERROR_00442, "DS",
9857                    "In vkCmdBeginRenderPass() the VkRenderPassBeginInfo struct has a clearValueCount of %u but there must "
9858                    "be at least %u entries in pClearValues array to account for the highest index attachment in renderPass "
9859                    "0x%" PRIx64
9860                    " that uses VK_ATTACHMENT_LOAD_OP_CLEAR is %u. Note that the pClearValues array "
9861                    "is indexed by attachment number so even if some pClearValues entries between 0 and %u correspond to "
9862                    "attachments that aren't cleared they will be ignored. %s",
9863                    pRenderPassBegin->clearValueCount, clear_op_size, reinterpret_cast<uint64_t &>(render_pass_state->renderPass),
9864                    clear_op_size, clear_op_size - 1, validation_error_map[VALIDATION_ERROR_00442]);
9865            }
9866            if (clear_op_size < pRenderPassBegin->clearValueCount) {
9867                skip_call |= log_msg(
9868                    dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
9869                    reinterpret_cast<uint64_t &>(render_pass_state->renderPass), __LINE__,
9870                    DRAWSTATE_RENDERPASS_TOO_MANY_CLEAR_VALUES, "DS",
9871                    "In vkCmdBeginRenderPass() the VkRenderPassBeginInfo struct has a clearValueCount of %u but only first %u "
9872                    "entries in pClearValues array are used. The highest index of any attachment in renderPass 0x%" PRIx64
9873                    " that uses VK_ATTACHMENT_LOAD_OP_CLEAR is %u - other pClearValues are ignored.",
9874                    pRenderPassBegin->clearValueCount, clear_op_size, reinterpret_cast<uint64_t &>(render_pass_state->renderPass),
9875                    clear_op_size - 1);
9876            }
9877            skip_call |= VerifyRenderAreaBounds(dev_data, pRenderPassBegin);
9878            skip_call |= VerifyFramebufferAndRenderPassLayouts(dev_data, cb_node, pRenderPassBegin,
9879                                                               getFramebufferState(dev_data, pRenderPassBegin->framebuffer));
9880            skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdBeginRenderPass()", VALIDATION_ERROR_00440);
9881            skip_call |= ValidateDependencies(dev_data, framebuffer, render_pass_state);
9882            skip_call |= validatePrimaryCommandBuffer(dev_data, cb_node, "vkCmdBeginRenderPass", VALIDATION_ERROR_00441);
9883            skip_call |= ValidateCmd(dev_data, cb_node, CMD_BEGINRENDERPASS, "vkCmdBeginRenderPass()");
9884            UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_BEGINRENDERPASS);
9885            cb_node->activeRenderPass = render_pass_state;
9886            // This is a shallow copy as that is all that is needed for now
9887            cb_node->activeRenderPassBeginInfo = *pRenderPassBegin;
9888            cb_node->activeSubpass = 0;
9889            cb_node->activeSubpassContents = contents;
9890            cb_node->framebuffers.insert(pRenderPassBegin->framebuffer);
9891            // Connect this framebuffer and its children to this cmdBuffer
9892            AddFramebufferBinding(dev_data, cb_node, framebuffer);
9893            // transition attachments to the correct layouts for the first subpass
9894            TransitionSubpassLayouts(dev_data, cb_node, &cb_node->activeRenderPassBeginInfo, cb_node->activeSubpass, framebuffer);
9895        }
9896    }
9897    lock.unlock();
9898    if (!skip_call) {
9899        dev_data->dispatch_table.CmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
9900    }
9901}
9902
9903VKAPI_ATTR void VKAPI_CALL CmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
9904    bool skip_call = false;
9905    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
9906    std::unique_lock<std::mutex> lock(global_lock);
9907    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9908    if (pCB) {
9909        skip_call |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdNextSubpass", VALIDATION_ERROR_00459);
9910        skip_call |= ValidateCmd(dev_data, pCB, CMD_NEXTSUBPASS, "vkCmdNextSubpass()");
9911        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_NEXTSUBPASS);
9912        skip_call |= outsideRenderPass(dev_data, pCB, "vkCmdNextSubpass()", VALIDATION_ERROR_00458);
9913
9914        auto subpassCount = pCB->activeRenderPass->createInfo.subpassCount;
9915        if (pCB->activeSubpass == subpassCount - 1) {
9916            skip_call |= log_msg(
9917                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9918                reinterpret_cast<uint64_t>(commandBuffer), __LINE__, VALIDATION_ERROR_00453, "DS",
9919                "vkCmdNextSubpass(): Attempted to advance beyond final subpass. %s", validation_error_map[VALIDATION_ERROR_00453]);
9920        }
9921    }
9922    lock.unlock();
9923
9924    if (skip_call) return;
9925
9926    dev_data->dispatch_table.CmdNextSubpass(commandBuffer, contents);
9927
9928    if (pCB) {
9929        lock.lock();
9930        pCB->activeSubpass++;
9931        pCB->activeSubpassContents = contents;
9932        TransitionSubpassLayouts(dev_data, pCB, &pCB->activeRenderPassBeginInfo, pCB->activeSubpass,
9933                                 getFramebufferState(dev_data, pCB->activeRenderPassBeginInfo.framebuffer));
9934    }
9935}
9936
9937VKAPI_ATTR void VKAPI_CALL CmdEndRenderPass(VkCommandBuffer commandBuffer) {
9938    bool skip_call = false;
9939    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
9940    std::unique_lock<std::mutex> lock(global_lock);
9941    auto pCB = getCBNode(dev_data, commandBuffer);
9942    FRAMEBUFFER_STATE *framebuffer = NULL;
9943    if (pCB) {
9944        RENDER_PASS_STATE *rp_state = pCB->activeRenderPass;
9945        framebuffer = getFramebufferState(dev_data, pCB->activeFramebuffer);
9946        if (rp_state) {
9947            if (pCB->activeSubpass != rp_state->createInfo.subpassCount - 1) {
9948                skip_call |= log_msg(
9949                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9950                    reinterpret_cast<uint64_t>(commandBuffer), __LINE__, VALIDATION_ERROR_00460, "DS",
9951                    "vkCmdEndRenderPass(): Called before reaching final subpass. %s", validation_error_map[VALIDATION_ERROR_00460]);
9952            }
9953
9954            for (size_t i = 0; i < rp_state->createInfo.attachmentCount; ++i) {
9955                MT_FB_ATTACHMENT_INFO &fb_info = framebuffer->attachments[i];
9956                auto pAttachment = &rp_state->createInfo.pAttachments[i];
9957                if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->storeOp, pAttachment->stencilStoreOp,
9958                                                         VK_ATTACHMENT_STORE_OP_STORE)) {
9959                    std::function<bool()> function = [=]() {
9960                        SetImageMemoryValid(dev_data, getImageState(dev_data, fb_info.image), true);
9961                        return false;
9962                    };
9963                    pCB->validate_functions.push_back(function);
9964                } else if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->storeOp,
9965                                                                pAttachment->stencilStoreOp, VK_ATTACHMENT_STORE_OP_DONT_CARE)) {
9966                    std::function<bool()> function = [=]() {
9967                        SetImageMemoryValid(dev_data, getImageState(dev_data, fb_info.image), false);
9968                        return false;
9969                    };
9970                    pCB->validate_functions.push_back(function);
9971                }
9972            }
9973        }
9974        skip_call |= outsideRenderPass(dev_data, pCB, "vkCmdEndRenderpass()", VALIDATION_ERROR_00464);
9975        skip_call |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdEndRenderPass", VALIDATION_ERROR_00465);
9976        skip_call |= ValidateCmd(dev_data, pCB, CMD_ENDRENDERPASS, "vkCmdEndRenderPass()");
9977        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_ENDRENDERPASS);
9978    }
9979    lock.unlock();
9980
9981    if (skip_call) return;
9982
9983    dev_data->dispatch_table.CmdEndRenderPass(commandBuffer);
9984
9985    if (pCB) {
9986        lock.lock();
9987        TransitionFinalSubpassLayouts(dev_data, pCB, &pCB->activeRenderPassBeginInfo, framebuffer);
9988        pCB->activeRenderPass = nullptr;
9989        pCB->activeSubpass = 0;
9990        pCB->activeFramebuffer = VK_NULL_HANDLE;
9991    }
9992}
9993
9994static bool logInvalidAttachmentMessage(layer_data *dev_data, VkCommandBuffer secondaryBuffer, uint32_t primaryAttach,
9995                                        uint32_t secondaryAttach, const char *msg) {
9996    return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9997                   VALIDATION_ERROR_02059, "DS", "vkCmdExecuteCommands() called w/ invalid Secondary Cmd Buffer 0x%" PRIx64
9998                                                 " which has a render pass "
9999                                                 "that is not compatible with the Primary Cmd Buffer current render pass. "
10000                                                 "Attachment %u is not compatible with %u: %s. %s",
10001                   reinterpret_cast<uint64_t &>(secondaryBuffer), primaryAttach, secondaryAttach, msg,
10002                   validation_error_map[VALIDATION_ERROR_02059]);
10003}
10004
10005static bool validateAttachmentCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer,
10006                                            VkRenderPassCreateInfo const *primaryPassCI, uint32_t primaryAttach,
10007                                            VkCommandBuffer secondaryBuffer, VkRenderPassCreateInfo const *secondaryPassCI,
10008                                            uint32_t secondaryAttach, bool is_multi) {
10009    bool skip_call = false;
10010    if (primaryPassCI->attachmentCount <= primaryAttach) {
10011        primaryAttach = VK_ATTACHMENT_UNUSED;
10012    }
10013    if (secondaryPassCI->attachmentCount <= secondaryAttach) {
10014        secondaryAttach = VK_ATTACHMENT_UNUSED;
10015    }
10016    if (primaryAttach == VK_ATTACHMENT_UNUSED && secondaryAttach == VK_ATTACHMENT_UNUSED) {
10017        return skip_call;
10018    }
10019    if (primaryAttach == VK_ATTACHMENT_UNUSED) {
10020        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach,
10021                                                 "The first is unused while the second is not.");
10022        return skip_call;
10023    }
10024    if (secondaryAttach == VK_ATTACHMENT_UNUSED) {
10025        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach,
10026                                                 "The second is unused while the first is not.");
10027        return skip_call;
10028    }
10029    if (primaryPassCI->pAttachments[primaryAttach].format != secondaryPassCI->pAttachments[secondaryAttach].format) {
10030        skip_call |=
10031            logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different formats.");
10032    }
10033    if (primaryPassCI->pAttachments[primaryAttach].samples != secondaryPassCI->pAttachments[secondaryAttach].samples) {
10034        skip_call |=
10035            logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different samples.");
10036    }
10037    if (is_multi && primaryPassCI->pAttachments[primaryAttach].flags != secondaryPassCI->pAttachments[secondaryAttach].flags) {
10038        skip_call |=
10039            logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different flags.");
10040    }
10041    return skip_call;
10042}
10043
10044static bool validateSubpassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer,
10045                                         VkRenderPassCreateInfo const *primaryPassCI, VkCommandBuffer secondaryBuffer,
10046                                         VkRenderPassCreateInfo const *secondaryPassCI, const int subpass, bool is_multi) {
10047    bool skip_call = false;
10048    const VkSubpassDescription &primary_desc = primaryPassCI->pSubpasses[subpass];
10049    const VkSubpassDescription &secondary_desc = secondaryPassCI->pSubpasses[subpass];
10050    uint32_t maxInputAttachmentCount = std::max(primary_desc.inputAttachmentCount, secondary_desc.inputAttachmentCount);
10051    for (uint32_t i = 0; i < maxInputAttachmentCount; ++i) {
10052        uint32_t primary_input_attach = VK_ATTACHMENT_UNUSED, secondary_input_attach = VK_ATTACHMENT_UNUSED;
10053        if (i < primary_desc.inputAttachmentCount) {
10054            primary_input_attach = primary_desc.pInputAttachments[i].attachment;
10055        }
10056        if (i < secondary_desc.inputAttachmentCount) {
10057            secondary_input_attach = secondary_desc.pInputAttachments[i].attachment;
10058        }
10059        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_input_attach, secondaryBuffer,
10060                                                     secondaryPassCI, secondary_input_attach, is_multi);
10061    }
10062    uint32_t maxColorAttachmentCount = std::max(primary_desc.colorAttachmentCount, secondary_desc.colorAttachmentCount);
10063    for (uint32_t i = 0; i < maxColorAttachmentCount; ++i) {
10064        uint32_t primary_color_attach = VK_ATTACHMENT_UNUSED, secondary_color_attach = VK_ATTACHMENT_UNUSED;
10065        if (i < primary_desc.colorAttachmentCount) {
10066            primary_color_attach = primary_desc.pColorAttachments[i].attachment;
10067        }
10068        if (i < secondary_desc.colorAttachmentCount) {
10069            secondary_color_attach = secondary_desc.pColorAttachments[i].attachment;
10070        }
10071        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_color_attach, secondaryBuffer,
10072                                                     secondaryPassCI, secondary_color_attach, is_multi);
10073        uint32_t primary_resolve_attach = VK_ATTACHMENT_UNUSED, secondary_resolve_attach = VK_ATTACHMENT_UNUSED;
10074        if (i < primary_desc.colorAttachmentCount && primary_desc.pResolveAttachments) {
10075            primary_resolve_attach = primary_desc.pResolveAttachments[i].attachment;
10076        }
10077        if (i < secondary_desc.colorAttachmentCount && secondary_desc.pResolveAttachments) {
10078            secondary_resolve_attach = secondary_desc.pResolveAttachments[i].attachment;
10079        }
10080        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_resolve_attach,
10081                                                     secondaryBuffer, secondaryPassCI, secondary_resolve_attach, is_multi);
10082    }
10083    uint32_t primary_depthstencil_attach = VK_ATTACHMENT_UNUSED, secondary_depthstencil_attach = VK_ATTACHMENT_UNUSED;
10084    if (primary_desc.pDepthStencilAttachment) {
10085        primary_depthstencil_attach = primary_desc.pDepthStencilAttachment[0].attachment;
10086    }
10087    if (secondary_desc.pDepthStencilAttachment) {
10088        secondary_depthstencil_attach = secondary_desc.pDepthStencilAttachment[0].attachment;
10089    }
10090    skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_depthstencil_attach,
10091                                                 secondaryBuffer, secondaryPassCI, secondary_depthstencil_attach, is_multi);
10092    return skip_call;
10093}
10094
10095// Verify that given renderPass CreateInfo for primary and secondary command buffers are compatible.
10096//  This function deals directly with the CreateInfo, there are overloaded versions below that can take the renderPass handle and
10097//  will then feed into this function
10098static bool validateRenderPassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer,
10099                                            VkRenderPassCreateInfo const *primaryPassCI, VkCommandBuffer secondaryBuffer,
10100                                            VkRenderPassCreateInfo const *secondaryPassCI) {
10101    bool skip_call = false;
10102
10103    if (primaryPassCI->subpassCount != secondaryPassCI->subpassCount) {
10104        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10105                             DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10106                             "vkCmdExecuteCommands() called w/ invalid secondary Cmd Buffer 0x%" PRIx64
10107                             " that has a subpassCount of %u that is incompatible with the primary Cmd Buffer 0x%" PRIx64
10108                             " that has a subpassCount of %u.",
10109                             reinterpret_cast<uint64_t &>(secondaryBuffer), secondaryPassCI->subpassCount,
10110                             reinterpret_cast<uint64_t &>(primaryBuffer), primaryPassCI->subpassCount);
10111    } else {
10112        for (uint32_t i = 0; i < primaryPassCI->subpassCount; ++i) {
10113            skip_call |= validateSubpassCompatibility(dev_data, primaryBuffer, primaryPassCI, secondaryBuffer, secondaryPassCI, i,
10114                                                      primaryPassCI->subpassCount > 1);
10115        }
10116    }
10117    return skip_call;
10118}
10119
10120static bool validateFramebuffer(layer_data *dev_data, VkCommandBuffer primaryBuffer, const GLOBAL_CB_NODE *pCB,
10121                                VkCommandBuffer secondaryBuffer, const GLOBAL_CB_NODE *pSubCB) {
10122    bool skip_call = false;
10123    if (!pSubCB->beginInfo.pInheritanceInfo) {
10124        return skip_call;
10125    }
10126    VkFramebuffer primary_fb = pCB->activeFramebuffer;
10127    VkFramebuffer secondary_fb = pSubCB->beginInfo.pInheritanceInfo->framebuffer;
10128    if (secondary_fb != VK_NULL_HANDLE) {
10129        if (primary_fb != secondary_fb) {
10130            skip_call |= log_msg(
10131                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10132                VALIDATION_ERROR_02060, "DS",
10133                "vkCmdExecuteCommands() called w/ invalid secondary command buffer 0x%" PRIx64 " which has a framebuffer 0x%" PRIx64
10134                " that is not the same as the primary command buffer's current active framebuffer 0x%" PRIx64 ". %s",
10135                reinterpret_cast<uint64_t &>(secondaryBuffer), reinterpret_cast<uint64_t &>(secondary_fb),
10136                reinterpret_cast<uint64_t &>(primary_fb), validation_error_map[VALIDATION_ERROR_02060]);
10137        }
10138        auto fb = getFramebufferState(dev_data, secondary_fb);
10139        if (!fb) {
10140            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10141                                 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10142                                 "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
10143                                 "which has invalid framebuffer 0x%" PRIx64 ".",
10144                                 (void *)secondaryBuffer, (uint64_t)(secondary_fb));
10145            return skip_call;
10146        }
10147        auto cb_renderpass = getRenderPassState(dev_data, pSubCB->beginInfo.pInheritanceInfo->renderPass);
10148        if (cb_renderpass->renderPass != fb->createInfo.renderPass) {
10149            skip_call |= validateRenderPassCompatibility(dev_data, secondaryBuffer, fb->renderPassCreateInfo.ptr(), secondaryBuffer,
10150                                                         cb_renderpass->createInfo.ptr());
10151        }
10152    }
10153    return skip_call;
10154}
10155
10156static bool validateSecondaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, GLOBAL_CB_NODE *pSubCB) {
10157    bool skip_call = false;
10158    unordered_set<int> activeTypes;
10159    for (auto queryObject : pCB->activeQueries) {
10160        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
10161        if (queryPoolData != dev_data->queryPoolMap.end()) {
10162            if (queryPoolData->second.createInfo.queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS &&
10163                pSubCB->beginInfo.pInheritanceInfo) {
10164                VkQueryPipelineStatisticFlags cmdBufStatistics = pSubCB->beginInfo.pInheritanceInfo->pipelineStatistics;
10165                if ((cmdBufStatistics & queryPoolData->second.createInfo.pipelineStatistics) != cmdBufStatistics) {
10166                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
10167                                         __LINE__, VALIDATION_ERROR_02065, "DS",
10168                                         "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
10169                                         "which has invalid active query pool 0x%" PRIx64
10170                                         ". Pipeline statistics is being queried so the command "
10171                                         "buffer must have all bits set on the queryPool. %s",
10172                                         pCB->commandBuffer, reinterpret_cast<const uint64_t &>(queryPoolData->first),
10173                                         validation_error_map[VALIDATION_ERROR_02065]);
10174                }
10175            }
10176            activeTypes.insert(queryPoolData->second.createInfo.queryType);
10177        }
10178    }
10179    for (auto queryObject : pSubCB->startedQueries) {
10180        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
10181        if (queryPoolData != dev_data->queryPoolMap.end() && activeTypes.count(queryPoolData->second.createInfo.queryType)) {
10182            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10183                                 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10184                                 "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
10185                                 "which has invalid active query pool 0x%" PRIx64
10186                                 "of type %d but a query of that type has been started on "
10187                                 "secondary Cmd Buffer 0x%p.",
10188                                 pCB->commandBuffer, reinterpret_cast<const uint64_t &>(queryPoolData->first),
10189                                 queryPoolData->second.createInfo.queryType, pSubCB->commandBuffer);
10190        }
10191    }
10192
10193    auto primary_pool = getCommandPoolNode(dev_data, pCB->createInfo.commandPool);
10194    auto secondary_pool = getCommandPoolNode(dev_data, pSubCB->createInfo.commandPool);
10195    if (primary_pool && secondary_pool && (primary_pool->queueFamilyIndex != secondary_pool->queueFamilyIndex)) {
10196        skip_call |=
10197            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10198                    reinterpret_cast<uint64_t>(pSubCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_QUEUE_FAMILY, "DS",
10199                    "vkCmdExecuteCommands(): Primary command buffer 0x%p"
10200                    " created in queue family %d has secondary command buffer 0x%p created in queue family %d.",
10201                    pCB->commandBuffer, primary_pool->queueFamilyIndex, pSubCB->commandBuffer, secondary_pool->queueFamilyIndex);
10202    }
10203
10204    return skip_call;
10205}
10206
10207VKAPI_ATTR void VKAPI_CALL CmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount,
10208                                              const VkCommandBuffer *pCommandBuffers) {
10209    bool skip_call = false;
10210    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
10211    std::unique_lock<std::mutex> lock(global_lock);
10212    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
10213    if (pCB) {
10214        GLOBAL_CB_NODE *pSubCB = NULL;
10215        for (uint32_t i = 0; i < commandBuffersCount; i++) {
10216            pSubCB = getCBNode(dev_data, pCommandBuffers[i]);
10217            assert(pSubCB);
10218            if (VK_COMMAND_BUFFER_LEVEL_PRIMARY == pSubCB->createInfo.level) {
10219                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
10220                                     __LINE__, VALIDATION_ERROR_00153, "DS",
10221                                     "vkCmdExecuteCommands() called w/ Primary Cmd Buffer 0x%p in element %u of pCommandBuffers "
10222                                     "array. All cmd buffers in pCommandBuffers array must be secondary. %s",
10223                                     pCommandBuffers[i], i, validation_error_map[VALIDATION_ERROR_00153]);
10224            } else if (pCB->activeRenderPass) {  // Secondary CB w/i RenderPass must have *CONTINUE_BIT set
10225                auto secondary_rp_state = getRenderPassState(dev_data, pSubCB->beginInfo.pInheritanceInfo->renderPass);
10226                if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
10227                    skip_call |= log_msg(
10228                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10229                        (uint64_t)pCommandBuffers[i], __LINE__, VALIDATION_ERROR_02057, "DS",
10230                        "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) executed within render pass (0x%" PRIxLEAST64
10231                        ") must have had vkBeginCommandBuffer() called w/ VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set. %s",
10232                        pCommandBuffers[i], (uint64_t)pCB->activeRenderPass->renderPass,
10233                        validation_error_map[VALIDATION_ERROR_02057]);
10234                } else {
10235                    // Make sure render pass is compatible with parent command buffer pass if has continue
10236                    if (pCB->activeRenderPass->renderPass != secondary_rp_state->renderPass) {
10237                        skip_call |=
10238                            validateRenderPassCompatibility(dev_data, commandBuffer, pCB->activeRenderPass->createInfo.ptr(),
10239                                                            pCommandBuffers[i], secondary_rp_state->createInfo.ptr());
10240                    }
10241                    //  If framebuffer for secondary CB is not NULL, then it must match active FB from primaryCB
10242                    skip_call |= validateFramebuffer(dev_data, commandBuffer, pCB, pCommandBuffers[i], pSubCB);
10243                }
10244                string errorString = "";
10245                // secondaryCB must have been created w/ RP compatible w/ primaryCB active renderpass
10246                if ((pCB->activeRenderPass->renderPass != secondary_rp_state->renderPass) &&
10247                    !verify_renderpass_compatibility(dev_data, pCB->activeRenderPass->createInfo.ptr(),
10248                                                     secondary_rp_state->createInfo.ptr(), errorString)) {
10249                    skip_call |= log_msg(
10250                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10251                        (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
10252                        "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) w/ render pass (0x%" PRIxLEAST64
10253                        ") is incompatible w/ primary command buffer (0x%p) w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
10254                        pCommandBuffers[i], (uint64_t)pSubCB->beginInfo.pInheritanceInfo->renderPass, commandBuffer,
10255                        (uint64_t)pCB->activeRenderPass->renderPass, errorString.c_str());
10256                }
10257            }
10258            // TODO(mlentine): Move more logic into this method
10259            skip_call |= validateSecondaryCommandBufferState(dev_data, pCB, pSubCB);
10260            skip_call |= validateCommandBufferState(dev_data, pSubCB, "vkCmdExecuteCommands()");
10261            // Secondary cmdBuffers are considered pending execution starting w/
10262            // being recorded
10263            if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
10264                if (dev_data->globalInFlightCmdBuffers.find(pSubCB->commandBuffer) != dev_data->globalInFlightCmdBuffers.end()) {
10265                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10266                                         VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)(pCB->commandBuffer), __LINE__,
10267                                         VALIDATION_ERROR_00154, "DS",
10268                                         "Attempt to simultaneously execute command buffer 0x%p"
10269                                         " without VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set! %s",
10270                                         pCB->commandBuffer, validation_error_map[VALIDATION_ERROR_00154]);
10271                }
10272                if (pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) {
10273                    // Warn that non-simultaneous secondary cmd buffer renders primary non-simultaneous
10274                    skip_call |= log_msg(
10275                        dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10276                        (uint64_t)(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
10277                        "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) "
10278                        "does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary command buffer "
10279                        "(0x%p) to be treated as if it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
10280                        "set, even though it does.",
10281                        pCommandBuffers[i], pCB->commandBuffer);
10282                    pCB->beginInfo.flags &= ~VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
10283                }
10284            }
10285            if (!pCB->activeQueries.empty() && !dev_data->enabled_features.inheritedQueries) {
10286                skip_call |=
10287                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10288                            reinterpret_cast<uint64_t>(pCommandBuffers[i]), __LINE__, VALIDATION_ERROR_02062, "DS",
10289                            "vkCmdExecuteCommands(): Secondary Command Buffer "
10290                            "(0x%p) cannot be submitted with a query in "
10291                            "flight and inherited queries not "
10292                            "supported on this device. %s",
10293                            pCommandBuffers[i], validation_error_map[VALIDATION_ERROR_02062]);
10294            }
10295            // Propagate layout transitions to the primary cmd buffer
10296            for (auto ilm_entry : pSubCB->imageLayoutMap) {
10297                SetLayout(dev_data, pCB, ilm_entry.first, ilm_entry.second);
10298            }
10299            pSubCB->primaryCommandBuffer = pCB->commandBuffer;
10300            pCB->secondaryCommandBuffers.insert(pSubCB->commandBuffer);
10301            dev_data->globalInFlightCmdBuffers.insert(pSubCB->commandBuffer);
10302            for (auto &function : pSubCB->queryUpdates) {
10303                pCB->queryUpdates.push_back(function);
10304            }
10305        }
10306        skip_call |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdExecuteComands", VALIDATION_ERROR_00163);
10307        skip_call |= ValidateCmd(dev_data, pCB, CMD_EXECUTECOMMANDS, "vkCmdExecuteComands()");
10308        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_EXECUTECOMMANDS);
10309    }
10310    lock.unlock();
10311    if (!skip_call) dev_data->dispatch_table.CmdExecuteCommands(commandBuffer, commandBuffersCount, pCommandBuffers);
10312}
10313
10314VKAPI_ATTR VkResult VKAPI_CALL MapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags,
10315                                         void **ppData) {
10316    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10317
10318    bool skip_call = false;
10319    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10320    std::unique_lock<std::mutex> lock(global_lock);
10321    DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem);
10322    if (mem_info) {
10323        // TODO : This could me more fine-grained to track just region that is valid
10324        mem_info->global_valid = true;
10325        auto end_offset = (VK_WHOLE_SIZE == size) ? mem_info->alloc_info.allocationSize - 1 : offset + size - 1;
10326        skip_call |= ValidateMapImageLayouts(dev_data, device, mem_info, offset, end_offset);
10327        // TODO : Do we need to create new "bound_range" for the mapped range?
10328        SetMemRangesValid(dev_data, mem_info, offset, end_offset);
10329        if ((dev_data->phys_dev_mem_props.memoryTypes[mem_info->alloc_info.memoryTypeIndex].propertyFlags &
10330             VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
10331            skip_call = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10332                                (uint64_t)mem, __LINE__, VALIDATION_ERROR_00629, "MEM",
10333                                "Mapping Memory without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set: mem obj 0x%" PRIxLEAST64 ". %s",
10334                                (uint64_t)mem, validation_error_map[VALIDATION_ERROR_00629]);
10335        }
10336    }
10337    skip_call |= ValidateMapMemRange(dev_data, mem, offset, size);
10338    lock.unlock();
10339
10340    if (!skip_call) {
10341        result = dev_data->dispatch_table.MapMemory(device, mem, offset, size, flags, ppData);
10342        if (VK_SUCCESS == result) {
10343            lock.lock();
10344            // TODO : What's the point of this range? See comment on creating new "bound_range" above, which may replace this
10345            storeMemRanges(dev_data, mem, offset, size);
10346            initializeAndTrackMemory(dev_data, mem, offset, size, ppData);
10347            lock.unlock();
10348        }
10349    }
10350    return result;
10351}
10352
10353VKAPI_ATTR void VKAPI_CALL UnmapMemory(VkDevice device, VkDeviceMemory mem) {
10354    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10355    bool skip_call = false;
10356
10357    std::unique_lock<std::mutex> lock(global_lock);
10358    skip_call |= deleteMemRanges(dev_data, mem);
10359    lock.unlock();
10360    if (!skip_call) {
10361        dev_data->dispatch_table.UnmapMemory(device, mem);
10362    }
10363}
10364
10365static bool validateMemoryIsMapped(layer_data *dev_data, const char *funcName, uint32_t memRangeCount,
10366                                   const VkMappedMemoryRange *pMemRanges) {
10367    bool skip = false;
10368    for (uint32_t i = 0; i < memRangeCount; ++i) {
10369        auto mem_info = getMemObjInfo(dev_data, pMemRanges[i].memory);
10370        if (mem_info) {
10371            if (pMemRanges[i].size == VK_WHOLE_SIZE) {
10372                if (mem_info->mem_range.offset > pMemRanges[i].offset) {
10373                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10374                                    VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
10375                                    VALIDATION_ERROR_00643, "MEM", "%s: Flush/Invalidate offset (" PRINTF_SIZE_T_SPECIFIER
10376                                                                   ") is less than Memory Object's offset "
10377                                                                   "(" PRINTF_SIZE_T_SPECIFIER "). %s",
10378                                    funcName, static_cast<size_t>(pMemRanges[i].offset),
10379                                    static_cast<size_t>(mem_info->mem_range.offset), validation_error_map[VALIDATION_ERROR_00643]);
10380                }
10381            } else {
10382                const uint64_t data_end = (mem_info->mem_range.size == VK_WHOLE_SIZE)
10383                                              ? mem_info->alloc_info.allocationSize
10384                                              : (mem_info->mem_range.offset + mem_info->mem_range.size);
10385                if ((mem_info->mem_range.offset > pMemRanges[i].offset) ||
10386                    (data_end < (pMemRanges[i].offset + pMemRanges[i].size))) {
10387                    skip |=
10388                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10389                                (uint64_t)pMemRanges[i].memory, __LINE__, VALIDATION_ERROR_00642, "MEM",
10390                                "%s: Flush/Invalidate size or offset (" PRINTF_SIZE_T_SPECIFIER ", " PRINTF_SIZE_T_SPECIFIER
10391                                ") exceed the Memory Object's upper-bound "
10392                                "(" PRINTF_SIZE_T_SPECIFIER "). %s",
10393                                funcName, static_cast<size_t>(pMemRanges[i].offset + pMemRanges[i].size),
10394                                static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(data_end),
10395                                validation_error_map[VALIDATION_ERROR_00642]);
10396                }
10397            }
10398        }
10399    }
10400    return skip;
10401}
10402
10403static bool ValidateAndCopyNoncoherentMemoryToDriver(layer_data *dev_data, uint32_t mem_range_count,
10404                                                     const VkMappedMemoryRange *mem_ranges) {
10405    bool skip = false;
10406    for (uint32_t i = 0; i < mem_range_count; ++i) {
10407        auto mem_info = getMemObjInfo(dev_data, mem_ranges[i].memory);
10408        if (mem_info) {
10409            if (mem_info->shadow_copy) {
10410                VkDeviceSize size = (mem_info->mem_range.size != VK_WHOLE_SIZE)
10411                                        ? mem_info->mem_range.size
10412                                        : (mem_info->alloc_info.allocationSize - mem_info->mem_range.offset);
10413                char *data = static_cast<char *>(mem_info->shadow_copy);
10414                for (uint64_t j = 0; j < mem_info->shadow_pad_size; ++j) {
10415                    if (data[j] != NoncoherentMemoryFillValue) {
10416                        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10417                                        VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem_ranges[i].memory, __LINE__,
10418                                        MEMTRACK_INVALID_MAP, "MEM", "Memory underflow was detected on mem obj 0x%" PRIxLEAST64,
10419                                        (uint64_t)mem_ranges[i].memory);
10420                    }
10421                }
10422                for (uint64_t j = (size + mem_info->shadow_pad_size); j < (2 * mem_info->shadow_pad_size + size); ++j) {
10423                    if (data[j] != NoncoherentMemoryFillValue) {
10424                        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10425                                        VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem_ranges[i].memory, __LINE__,
10426                                        MEMTRACK_INVALID_MAP, "MEM", "Memory overflow was detected on mem obj 0x%" PRIxLEAST64,
10427                                        (uint64_t)mem_ranges[i].memory);
10428                    }
10429                }
10430                memcpy(mem_info->p_driver_data, static_cast<void *>(data + mem_info->shadow_pad_size), (size_t)(size));
10431            }
10432        }
10433    }
10434    return skip;
10435}
10436
10437static void CopyNoncoherentMemoryFromDriver(layer_data *dev_data, uint32_t mem_range_count, const VkMappedMemoryRange *mem_ranges) {
10438    for (uint32_t i = 0; i < mem_range_count; ++i) {
10439        auto mem_info = getMemObjInfo(dev_data, mem_ranges[i].memory);
10440        if (mem_info && mem_info->shadow_copy) {
10441            VkDeviceSize size = (mem_info->mem_range.size != VK_WHOLE_SIZE)
10442                                    ? mem_info->mem_range.size
10443                                    : (mem_info->alloc_info.allocationSize - mem_ranges[i].offset);
10444            char *data = static_cast<char *>(mem_info->shadow_copy);
10445            memcpy(data + mem_info->shadow_pad_size, mem_info->p_driver_data, (size_t)(size));
10446        }
10447    }
10448}
10449
10450static bool ValidateMappedMemoryRangeDeviceLimits(layer_data *dev_data, const char *func_name, uint32_t mem_range_count,
10451                                                  const VkMappedMemoryRange *mem_ranges) {
10452    bool skip = false;
10453    for (uint32_t i = 0; i < mem_range_count; ++i) {
10454        uint64_t atom_size = dev_data->phys_dev_properties.properties.limits.nonCoherentAtomSize;
10455        if (vk_safe_modulo(mem_ranges[i].offset, atom_size) != 0) {
10456            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
10457                            __LINE__, VALIDATION_ERROR_00644, "MEM",
10458                            "%s: Offset in pMemRanges[%d] is 0x%" PRIxLEAST64
10459                            ", which is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 "). %s",
10460                            func_name, i, mem_ranges[i].offset, atom_size, validation_error_map[VALIDATION_ERROR_00644]);
10461        }
10462        if ((mem_ranges[i].size != VK_WHOLE_SIZE) && (vk_safe_modulo(mem_ranges[i].size, atom_size) != 0)) {
10463            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
10464                            __LINE__, VALIDATION_ERROR_00645, "MEM",
10465                            "%s: Size in pMemRanges[%d] is 0x%" PRIxLEAST64
10466                            ", which is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 "). %s",
10467                            func_name, i, mem_ranges[i].size, atom_size, validation_error_map[VALIDATION_ERROR_00645]);
10468        }
10469    }
10470    return skip;
10471}
10472
10473static bool PreCallValidateFlushMappedMemoryRanges(layer_data *dev_data, uint32_t mem_range_count,
10474                                                   const VkMappedMemoryRange *mem_ranges) {
10475    bool skip = false;
10476    std::lock_guard<std::mutex> lock(global_lock);
10477    skip |= ValidateAndCopyNoncoherentMemoryToDriver(dev_data, mem_range_count, mem_ranges);
10478    skip |= validateMemoryIsMapped(dev_data, "vkFlushMappedMemoryRanges", mem_range_count, mem_ranges);
10479    return skip;
10480}
10481
10482VKAPI_ATTR VkResult VKAPI_CALL FlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount,
10483                                                       const VkMappedMemoryRange *pMemRanges) {
10484    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10485    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10486
10487    if (!PreCallValidateFlushMappedMemoryRanges(dev_data, memRangeCount, pMemRanges)) {
10488        result = dev_data->dispatch_table.FlushMappedMemoryRanges(device, memRangeCount, pMemRanges);
10489    }
10490    return result;
10491}
10492
10493static bool PreCallValidateInvalidateMappedMemoryRanges(layer_data *dev_data, uint32_t mem_range_count,
10494                                                        const VkMappedMemoryRange *mem_ranges) {
10495    bool skip = false;
10496    std::lock_guard<std::mutex> lock(global_lock);
10497    skip |= validateMemoryIsMapped(dev_data, "vkInvalidateMappedMemoryRanges", mem_range_count, mem_ranges);
10498    return skip;
10499}
10500
10501static void PostCallRecordInvalidateMappedMemoryRanges(layer_data *dev_data, uint32_t mem_range_count,
10502                                                       const VkMappedMemoryRange *mem_ranges) {
10503    std::lock_guard<std::mutex> lock(global_lock);
10504    // Update our shadow copy with modified driver data
10505    CopyNoncoherentMemoryFromDriver(dev_data, mem_range_count, mem_ranges);
10506}
10507
10508VKAPI_ATTR VkResult VKAPI_CALL InvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount,
10509                                                            const VkMappedMemoryRange *pMemRanges) {
10510    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10511    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10512
10513    if (!PreCallValidateInvalidateMappedMemoryRanges(dev_data, memRangeCount, pMemRanges)) {
10514        result = dev_data->dispatch_table.InvalidateMappedMemoryRanges(device, memRangeCount, pMemRanges);
10515        if (result == VK_SUCCESS) {
10516            PostCallRecordInvalidateMappedMemoryRanges(dev_data, memRangeCount, pMemRanges);
10517        }
10518    }
10519    return result;
10520}
10521
10522VKAPI_ATTR VkResult VKAPI_CALL BindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
10523    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10524    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10525    bool skip_call = false;
10526    std::unique_lock<std::mutex> lock(global_lock);
10527    auto image_state = getImageState(dev_data, image);
10528    if (image_state) {
10529        // Track objects tied to memory
10530        uint64_t image_handle = reinterpret_cast<uint64_t &>(image);
10531        skip_call = SetMemBinding(dev_data, mem, image_handle, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "vkBindImageMemory");
10532        if (!image_state->memory_requirements_checked) {
10533            // There's not an explicit requirement in the spec to call vkGetImageMemoryRequirements() prior to calling
10534            //  BindImageMemory but it's implied in that memory being bound must conform with VkMemoryRequirements from
10535            //  vkGetImageMemoryRequirements()
10536            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
10537                                 image_handle, __LINE__, DRAWSTATE_INVALID_IMAGE, "DS",
10538                                 "vkBindImageMemory(): Binding memory to image 0x%" PRIxLEAST64
10539                                 " but vkGetImageMemoryRequirements() has not been called on that image.",
10540                                 image_handle);
10541            // Make the call for them so we can verify the state
10542            lock.unlock();
10543            dev_data->dispatch_table.GetImageMemoryRequirements(device, image, &image_state->requirements);
10544            lock.lock();
10545        }
10546
10547        // Track and validate bound memory range information
10548        auto mem_info = getMemObjInfo(dev_data, mem);
10549        if (mem_info) {
10550            skip_call |= InsertImageMemoryRange(dev_data, image, mem_info, memoryOffset, image_state->requirements,
10551                                                image_state->createInfo.tiling == VK_IMAGE_TILING_LINEAR);
10552            skip_call |= ValidateMemoryTypes(dev_data, mem_info, image_state->requirements.memoryTypeBits, "vkBindImageMemory()",
10553                                             VALIDATION_ERROR_00806);
10554        }
10555
10556        lock.unlock();
10557        if (!skip_call) {
10558            result = dev_data->dispatch_table.BindImageMemory(device, image, mem, memoryOffset);
10559            lock.lock();
10560            image_state->binding.mem = mem;
10561            image_state->binding.offset = memoryOffset;
10562            image_state->binding.size = image_state->requirements.size;
10563            lock.unlock();
10564        }
10565    } else {
10566        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10567                reinterpret_cast<const uint64_t &>(image), __LINE__, MEMTRACK_INVALID_OBJECT, "MT",
10568                "vkBindImageMemory: Cannot find invalid image 0x%" PRIx64 ", has it already been deleted?",
10569                reinterpret_cast<const uint64_t &>(image));
10570    }
10571    return result;
10572}
10573
10574VKAPI_ATTR VkResult VKAPI_CALL SetEvent(VkDevice device, VkEvent event) {
10575    bool skip_call = false;
10576    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10577    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10578    std::unique_lock<std::mutex> lock(global_lock);
10579    auto event_state = getEventNode(dev_data, event);
10580    if (event_state) {
10581        event_state->needsSignaled = false;
10582        event_state->stageMask = VK_PIPELINE_STAGE_HOST_BIT;
10583        if (event_state->write_in_use) {
10584            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
10585                                 reinterpret_cast<const uint64_t &>(event), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
10586                                 "Cannot call vkSetEvent() on event 0x%" PRIxLEAST64 " that is already in use by a command buffer.",
10587                                 reinterpret_cast<const uint64_t &>(event));
10588        }
10589    }
10590    lock.unlock();
10591    // Host setting event is visible to all queues immediately so update stageMask for any queue that's seen this event
10592    // TODO : For correctness this needs separate fix to verify that app doesn't make incorrect assumptions about the
10593    // ordering of this command in relation to vkCmd[Set|Reset]Events (see GH297)
10594    for (auto queue_data : dev_data->queueMap) {
10595        auto event_entry = queue_data.second.eventToStageMap.find(event);
10596        if (event_entry != queue_data.second.eventToStageMap.end()) {
10597            event_entry->second |= VK_PIPELINE_STAGE_HOST_BIT;
10598        }
10599    }
10600    if (!skip_call) result = dev_data->dispatch_table.SetEvent(device, event);
10601    return result;
10602}
10603
10604VKAPI_ATTR VkResult VKAPI_CALL QueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo,
10605                                               VkFence fence) {
10606    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
10607    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10608    bool skip_call = false;
10609    std::unique_lock<std::mutex> lock(global_lock);
10610    auto pFence = getFenceNode(dev_data, fence);
10611    auto pQueue = getQueueState(dev_data, queue);
10612
10613    // First verify that fence is not in use
10614    skip_call |= ValidateFenceForSubmit(dev_data, pFence);
10615
10616    if (pFence) {
10617        SubmitFence(pQueue, pFence, bindInfoCount);
10618    }
10619
10620    for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) {
10621        const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx];
10622        // Track objects tied to memory
10623        for (uint32_t j = 0; j < bindInfo.bufferBindCount; j++) {
10624            for (uint32_t k = 0; k < bindInfo.pBufferBinds[j].bindCount; k++) {
10625                auto sparse_binding = bindInfo.pBufferBinds[j].pBinds[k];
10626                if (SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, sparse_binding.size},
10627                                        (uint64_t)bindInfo.pBufferBinds[j].buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
10628                                        "vkQueueBindSparse"))
10629                    skip_call = true;
10630            }
10631        }
10632        for (uint32_t j = 0; j < bindInfo.imageOpaqueBindCount; j++) {
10633            for (uint32_t k = 0; k < bindInfo.pImageOpaqueBinds[j].bindCount; k++) {
10634                auto sparse_binding = bindInfo.pImageOpaqueBinds[j].pBinds[k];
10635                if (SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, sparse_binding.size},
10636                                        (uint64_t)bindInfo.pImageOpaqueBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
10637                                        "vkQueueBindSparse"))
10638                    skip_call = true;
10639            }
10640        }
10641        for (uint32_t j = 0; j < bindInfo.imageBindCount; j++) {
10642            for (uint32_t k = 0; k < bindInfo.pImageBinds[j].bindCount; k++) {
10643                auto sparse_binding = bindInfo.pImageBinds[j].pBinds[k];
10644                // TODO: This size is broken for non-opaque bindings, need to update to comprehend full sparse binding data
10645                VkDeviceSize size = sparse_binding.extent.depth * sparse_binding.extent.height * sparse_binding.extent.width * 4;
10646                if (SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, size},
10647                                        (uint64_t)bindInfo.pImageBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
10648                                        "vkQueueBindSparse"))
10649                    skip_call = true;
10650            }
10651        }
10652
10653        std::vector<SEMAPHORE_WAIT> semaphore_waits;
10654        std::vector<VkSemaphore> semaphore_signals;
10655        for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) {
10656            VkSemaphore semaphore = bindInfo.pWaitSemaphores[i];
10657            auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
10658            if (pSemaphore) {
10659                if (pSemaphore->signaled) {
10660                    if (pSemaphore->signaler.first != VK_NULL_HANDLE) {
10661                        semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second});
10662                        pSemaphore->in_use.fetch_add(1);
10663                    }
10664                    pSemaphore->signaler.first = VK_NULL_HANDLE;
10665                    pSemaphore->signaled = false;
10666                } else {
10667                    skip_call |= log_msg(
10668                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
10669                        reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
10670                        "vkQueueBindSparse: Queue 0x%p is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.",
10671                        queue, reinterpret_cast<const uint64_t &>(semaphore));
10672                }
10673            }
10674        }
10675        for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) {
10676            VkSemaphore semaphore = bindInfo.pSignalSemaphores[i];
10677            auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
10678            if (pSemaphore) {
10679                if (pSemaphore->signaled) {
10680                    skip_call =
10681                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
10682                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
10683                                "vkQueueBindSparse: Queue 0x%p is signaling semaphore 0x%" PRIx64
10684                                ", but that semaphore is already signaled.",
10685                                queue, reinterpret_cast<const uint64_t &>(semaphore));
10686                } else {
10687                    pSemaphore->signaler.first = queue;
10688                    pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1;
10689                    pSemaphore->signaled = true;
10690                    pSemaphore->in_use.fetch_add(1);
10691                    semaphore_signals.push_back(semaphore);
10692                }
10693            }
10694        }
10695
10696        pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(), semaphore_waits, semaphore_signals,
10697                                         bindIdx == bindInfoCount - 1 ? fence : VK_NULL_HANDLE);
10698    }
10699
10700    if (pFence && !bindInfoCount) {
10701        // No work to do, just dropping a fence in the queue by itself.
10702        pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(), std::vector<SEMAPHORE_WAIT>(), std::vector<VkSemaphore>(),
10703                                         fence);
10704    }
10705
10706    lock.unlock();
10707
10708    if (!skip_call) return dev_data->dispatch_table.QueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
10709
10710    return result;
10711}
10712
10713VKAPI_ATTR VkResult VKAPI_CALL CreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo,
10714                                               const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore) {
10715    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10716    VkResult result = dev_data->dispatch_table.CreateSemaphore(device, pCreateInfo, pAllocator, pSemaphore);
10717    if (result == VK_SUCCESS) {
10718        std::lock_guard<std::mutex> lock(global_lock);
10719        SEMAPHORE_NODE *sNode = &dev_data->semaphoreMap[*pSemaphore];
10720        sNode->signaler.first = VK_NULL_HANDLE;
10721        sNode->signaler.second = 0;
10722        sNode->signaled = false;
10723    }
10724    return result;
10725}
10726
10727VKAPI_ATTR VkResult VKAPI_CALL CreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo,
10728                                           const VkAllocationCallbacks *pAllocator, VkEvent *pEvent) {
10729    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10730    VkResult result = dev_data->dispatch_table.CreateEvent(device, pCreateInfo, pAllocator, pEvent);
10731    if (result == VK_SUCCESS) {
10732        std::lock_guard<std::mutex> lock(global_lock);
10733        dev_data->eventMap[*pEvent].needsSignaled = false;
10734        dev_data->eventMap[*pEvent].write_in_use = 0;
10735        dev_data->eventMap[*pEvent].stageMask = VkPipelineStageFlags(0);
10736    }
10737    return result;
10738}
10739
10740static bool PreCallValidateCreateSwapchainKHR(layer_data *dev_data, const char *func_name,
10741                                              VkSwapchainCreateInfoKHR const *pCreateInfo, SURFACE_STATE *surface_state,
10742                                              SWAPCHAIN_NODE *old_swapchain_state) {
10743    auto most_recent_swapchain = surface_state->swapchain ? surface_state->swapchain : surface_state->old_swapchain;
10744
10745    // TODO: revisit this. some of these rules are being relaxed.
10746    if (most_recent_swapchain != old_swapchain_state || (surface_state->old_swapchain && surface_state->swapchain)) {
10747        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10748                    reinterpret_cast<uint64_t>(dev_data->device), __LINE__, DRAWSTATE_SWAPCHAIN_ALREADY_EXISTS, "DS",
10749                    "%s: surface has an existing swapchain other than oldSwapchain", func_name))
10750            return true;
10751    }
10752    if (old_swapchain_state && old_swapchain_state->createInfo.surface != pCreateInfo->surface) {
10753        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10754                    reinterpret_cast<uint64_t const &>(pCreateInfo->oldSwapchain), __LINE__, DRAWSTATE_SWAPCHAIN_WRONG_SURFACE,
10755                    "DS", "%s: pCreateInfo->oldSwapchain's surface is not pCreateInfo->surface", func_name))
10756            return true;
10757    }
10758    auto physical_device_state = getPhysicalDeviceState(dev_data->instance_data, dev_data->physical_device);
10759    if (physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState == UNCALLED) {
10760        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
10761                    reinterpret_cast<uint64_t>(dev_data->physical_device), __LINE__, DRAWSTATE_SWAPCHAIN_CREATE_BEFORE_QUERY, "DS",
10762                    "%s: surface capabilities not retrieved for this physical device", func_name))
10763            return true;
10764    } else {  // have valid capabilities
10765        auto &capabilities = physical_device_state->surfaceCapabilities;
10766        // Validate pCreateInfo->minImageCount against VkSurfaceCapabilitiesKHR::{min|max}ImageCount:
10767        if (pCreateInfo->minImageCount < capabilities.minImageCount) {
10768            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10769                        reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02331, "DS",
10770                        "%s called with minImageCount = %d, which is outside the bounds returned "
10771                        "by vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. minImageCount = %d, maxImageCount = %d). %s",
10772                        func_name, pCreateInfo->minImageCount, capabilities.minImageCount, capabilities.maxImageCount,
10773                        validation_error_map[VALIDATION_ERROR_02331]))
10774                return true;
10775        }
10776
10777        if ((capabilities.maxImageCount > 0) && (pCreateInfo->minImageCount > capabilities.maxImageCount)) {
10778            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10779                        reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02332, "DS",
10780                        "%s called with minImageCount = %d, which is outside the bounds returned "
10781                        "by vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. minImageCount = %d, maxImageCount = %d). %s",
10782                        func_name, pCreateInfo->minImageCount, capabilities.minImageCount, capabilities.maxImageCount,
10783                        validation_error_map[VALIDATION_ERROR_02332]))
10784                return true;
10785        }
10786
10787        // Validate pCreateInfo->imageExtent against VkSurfaceCapabilitiesKHR::{current|min|max}ImageExtent:
10788        if ((capabilities.currentExtent.width == kSurfaceSizeFromSwapchain) &&
10789            ((pCreateInfo->imageExtent.width < capabilities.minImageExtent.width) ||
10790             (pCreateInfo->imageExtent.width > capabilities.maxImageExtent.width) ||
10791             (pCreateInfo->imageExtent.height < capabilities.minImageExtent.height) ||
10792             (pCreateInfo->imageExtent.height > capabilities.maxImageExtent.height))) {
10793            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10794                        reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02334, "DS",
10795                        "%s called with imageExtent = (%d,%d), which is outside the bounds returned by "
10796                        "vkGetPhysicalDeviceSurfaceCapabilitiesKHR(): currentExtent = (%d,%d), minImageExtent = (%d,%d), "
10797                        "maxImageExtent = (%d,%d). %s",
10798                        func_name, pCreateInfo->imageExtent.width, pCreateInfo->imageExtent.height,
10799                        capabilities.currentExtent.width, capabilities.currentExtent.height, capabilities.minImageExtent.width,
10800                        capabilities.minImageExtent.height, capabilities.maxImageExtent.width, capabilities.maxImageExtent.height,
10801                        validation_error_map[VALIDATION_ERROR_02334]))
10802                return true;
10803        }
10804        if ((capabilities.currentExtent.width != kSurfaceSizeFromSwapchain) &&
10805            ((pCreateInfo->imageExtent.width != capabilities.currentExtent.width) ||
10806             (pCreateInfo->imageExtent.height != capabilities.currentExtent.height))) {
10807            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10808                        reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02334, "DS",
10809                        "%s called with imageExtent = (%d,%d), which is not equal to the currentExtent = (%d,%d) returned by "
10810                        "vkGetPhysicalDeviceSurfaceCapabilitiesKHR(). %s",
10811                        func_name, pCreateInfo->imageExtent.width, pCreateInfo->imageExtent.height,
10812                        capabilities.currentExtent.width, capabilities.currentExtent.height,
10813                        validation_error_map[VALIDATION_ERROR_02334]))
10814                return true;
10815        }
10816        // pCreateInfo->preTransform should have exactly one bit set, and that bit must also be set in
10817        // VkSurfaceCapabilitiesKHR::supportedTransforms.
10818        if (!pCreateInfo->preTransform || (pCreateInfo->preTransform & (pCreateInfo->preTransform - 1)) ||
10819            !(pCreateInfo->preTransform & capabilities.supportedTransforms)) {
10820            // This is an error situation; one for which we'd like to give the developer a helpful, multi-line error message.  Build
10821            // it up a little at a time, and then log it:
10822            std::string errorString = "";
10823            char str[1024];
10824            // Here's the first part of the message:
10825            sprintf(str, "%s called with a non-supported pCreateInfo->preTransform (i.e. %s).  Supported values are:\n", func_name,
10826                    string_VkSurfaceTransformFlagBitsKHR(pCreateInfo->preTransform));
10827            errorString += str;
10828            for (int i = 0; i < 32; i++) {
10829                // Build up the rest of the message:
10830                if ((1 << i) & capabilities.supportedTransforms) {
10831                    const char *newStr = string_VkSurfaceTransformFlagBitsKHR((VkSurfaceTransformFlagBitsKHR)(1 << i));
10832                    sprintf(str, "    %s\n", newStr);
10833                    errorString += str;
10834                }
10835            }
10836            // Log the message that we've built up:
10837            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10838                        reinterpret_cast<uint64_t &>(dev_data->device), __LINE__, VALIDATION_ERROR_02339, "DS", "%s. %s",
10839                        errorString.c_str(), validation_error_map[VALIDATION_ERROR_02339]))
10840                return true;
10841        }
10842
10843        // pCreateInfo->compositeAlpha should have exactly one bit set, and that bit must also be set in
10844        // VkSurfaceCapabilitiesKHR::supportedCompositeAlpha
10845        if (!pCreateInfo->compositeAlpha || (pCreateInfo->compositeAlpha & (pCreateInfo->compositeAlpha - 1)) ||
10846            !((pCreateInfo->compositeAlpha) & capabilities.supportedCompositeAlpha)) {
10847            // This is an error situation; one for which we'd like to give the developer a helpful, multi-line error message.  Build
10848            // it up a little at a time, and then log it:
10849            std::string errorString = "";
10850            char str[1024];
10851            // Here's the first part of the message:
10852            sprintf(str, "%s called with a non-supported pCreateInfo->compositeAlpha (i.e. %s).  Supported values are:\n",
10853                    func_name, string_VkCompositeAlphaFlagBitsKHR(pCreateInfo->compositeAlpha));
10854            errorString += str;
10855            for (int i = 0; i < 32; i++) {
10856                // Build up the rest of the message:
10857                if ((1 << i) & capabilities.supportedCompositeAlpha) {
10858                    const char *newStr = string_VkCompositeAlphaFlagBitsKHR((VkCompositeAlphaFlagBitsKHR)(1 << i));
10859                    sprintf(str, "    %s\n", newStr);
10860                    errorString += str;
10861                }
10862            }
10863            // Log the message that we've built up:
10864            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10865                        reinterpret_cast<uint64_t &>(dev_data->device), __LINE__, VALIDATION_ERROR_02340, "DS", "%s. %s",
10866                        errorString.c_str(), validation_error_map[VALIDATION_ERROR_02340]))
10867                return true;
10868        }
10869        // Validate pCreateInfo->imageArrayLayers against VkSurfaceCapabilitiesKHR::maxImageArrayLayers:
10870        if ((pCreateInfo->imageArrayLayers < 1) || (pCreateInfo->imageArrayLayers > capabilities.maxImageArrayLayers)) {
10871            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10872                        reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02335, "DS",
10873                        "%s called with a non-supported imageArrayLayers (i.e. %d).  Minimum value is 1, maximum value is %d. %s",
10874                        func_name, pCreateInfo->imageArrayLayers, capabilities.maxImageArrayLayers,
10875                        validation_error_map[VALIDATION_ERROR_02335]))
10876                return true;
10877        }
10878        // Validate pCreateInfo->imageUsage against VkSurfaceCapabilitiesKHR::supportedUsageFlags:
10879        if (pCreateInfo->imageUsage != (pCreateInfo->imageUsage & capabilities.supportedUsageFlags)) {
10880            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10881                        reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02336, "DS",
10882                        "%s called with a non-supported pCreateInfo->imageUsage (i.e. 0x%08x).  Supported flag bits are 0x%08x. %s",
10883                        func_name, pCreateInfo->imageUsage, capabilities.supportedUsageFlags,
10884                        validation_error_map[VALIDATION_ERROR_02336]))
10885                return true;
10886        }
10887    }
10888
10889    // Validate pCreateInfo values with the results of vkGetPhysicalDeviceSurfaceFormatsKHR():
10890    if (physical_device_state->vkGetPhysicalDeviceSurfaceFormatsKHRState != QUERY_DETAILS) {
10891        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10892                    reinterpret_cast<uint64_t>(dev_data->device), __LINE__, DRAWSTATE_SWAPCHAIN_CREATE_BEFORE_QUERY, "DS",
10893                    "%s called before calling vkGetPhysicalDeviceSurfaceFormatsKHR().", func_name))
10894            return true;
10895    } else {
10896        // Validate pCreateInfo->imageFormat against VkSurfaceFormatKHR::format:
10897        bool foundFormat = false;
10898        bool foundColorSpace = false;
10899        bool foundMatch = false;
10900        for (auto const &format : physical_device_state->surface_formats) {
10901            if (pCreateInfo->imageFormat == format.format) {
10902                // Validate pCreateInfo->imageColorSpace against VkSurfaceFormatKHR::colorSpace:
10903                foundFormat = true;
10904                if (pCreateInfo->imageColorSpace == format.colorSpace) {
10905                    foundMatch = true;
10906                    break;
10907                }
10908            } else {
10909                if (pCreateInfo->imageColorSpace == format.colorSpace) {
10910                    foundColorSpace = true;
10911                }
10912            }
10913        }
10914        if (!foundMatch) {
10915            if (!foundFormat) {
10916                if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10917                            reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02333, "DS",
10918                            "%s called with a non-supported pCreateInfo->imageFormat (i.e. %d). %s", func_name,
10919                            pCreateInfo->imageFormat, validation_error_map[VALIDATION_ERROR_02333]))
10920                    return true;
10921            }
10922            if (!foundColorSpace) {
10923                if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10924                            reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02333, "DS",
10925                            "%s called with a non-supported pCreateInfo->imageColorSpace (i.e. %d). %s", func_name,
10926                            pCreateInfo->imageColorSpace, validation_error_map[VALIDATION_ERROR_02333]))
10927                    return true;
10928            }
10929        }
10930    }
10931
10932    // Validate pCreateInfo values with the results of vkGetPhysicalDeviceSurfacePresentModesKHR():
10933    if (physical_device_state->vkGetPhysicalDeviceSurfacePresentModesKHRState != QUERY_DETAILS) {
10934        // FIFO is required to always be supported
10935        if (pCreateInfo->presentMode != VK_PRESENT_MODE_FIFO_KHR) {
10936            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10937                        reinterpret_cast<uint64_t>(dev_data->device), __LINE__, DRAWSTATE_SWAPCHAIN_CREATE_BEFORE_QUERY, "DS",
10938                        "%s called before calling vkGetPhysicalDeviceSurfacePresentModesKHR().", func_name))
10939                return true;
10940        }
10941    } else {
10942        // Validate pCreateInfo->presentMode against vkGetPhysicalDeviceSurfacePresentModesKHR():
10943        bool foundMatch = std::find(physical_device_state->present_modes.begin(), physical_device_state->present_modes.end(),
10944                                    pCreateInfo->presentMode) != physical_device_state->present_modes.end();
10945        if (!foundMatch) {
10946            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10947                        reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02341, "DS",
10948                        "%s called with a non-supported presentMode (i.e. %s). %s", func_name,
10949                        string_VkPresentModeKHR(pCreateInfo->presentMode), validation_error_map[VALIDATION_ERROR_02341]))
10950                return true;
10951        }
10952    }
10953
10954    return false;
10955}
10956
10957static void PostCallRecordCreateSwapchainKHR(layer_data *dev_data, VkResult result, const VkSwapchainCreateInfoKHR *pCreateInfo,
10958                                             VkSwapchainKHR *pSwapchain, SURFACE_STATE *surface_state,
10959                                             SWAPCHAIN_NODE *old_swapchain_state) {
10960    if (VK_SUCCESS == result) {
10961        std::lock_guard<std::mutex> lock(global_lock);
10962        auto swapchain_state = unique_ptr<SWAPCHAIN_NODE>(new SWAPCHAIN_NODE(pCreateInfo, *pSwapchain));
10963        surface_state->swapchain = swapchain_state.get();
10964        dev_data->device_extensions.swapchainMap[*pSwapchain] = std::move(swapchain_state);
10965    } else {
10966        surface_state->swapchain = nullptr;
10967    }
10968    // Spec requires that even if CreateSwapchainKHR fails, oldSwapchain behaves as replaced.
10969    if (old_swapchain_state) {
10970        old_swapchain_state->replaced = true;
10971    }
10972    surface_state->old_swapchain = old_swapchain_state;
10973    return;
10974}
10975
10976VKAPI_ATTR VkResult VKAPI_CALL CreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
10977                                                  const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchain) {
10978    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10979    auto surface_state = getSurfaceState(dev_data->instance_data, pCreateInfo->surface);
10980    auto old_swapchain_state = getSwapchainNode(dev_data, pCreateInfo->oldSwapchain);
10981
10982    if (PreCallValidateCreateSwapchainKHR(dev_data, "vkCreateSwapChainKHR()", pCreateInfo, surface_state, old_swapchain_state)) {
10983        return VK_ERROR_VALIDATION_FAILED_EXT;
10984    }
10985
10986    VkResult result = dev_data->dispatch_table.CreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);
10987
10988    PostCallRecordCreateSwapchainKHR(dev_data, result, pCreateInfo, pSwapchain, surface_state, old_swapchain_state);
10989
10990    return result;
10991}
10992
10993VKAPI_ATTR void VKAPI_CALL DestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
10994    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10995    bool skip_call = false;
10996
10997    std::unique_lock<std::mutex> lock(global_lock);
10998    auto swapchain_data = getSwapchainNode(dev_data, swapchain);
10999    if (swapchain_data) {
11000        if (swapchain_data->images.size() > 0) {
11001            for (auto swapchain_image : swapchain_data->images) {
11002                auto image_sub = dev_data->imageSubresourceMap.find(swapchain_image);
11003                if (image_sub != dev_data->imageSubresourceMap.end()) {
11004                    for (auto imgsubpair : image_sub->second) {
11005                        auto image_item = dev_data->imageLayoutMap.find(imgsubpair);
11006                        if (image_item != dev_data->imageLayoutMap.end()) {
11007                            dev_data->imageLayoutMap.erase(image_item);
11008                        }
11009                    }
11010                    dev_data->imageSubresourceMap.erase(image_sub);
11011                }
11012                skip_call =
11013                    ClearMemoryObjectBindings(dev_data, (uint64_t)swapchain_image, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT);
11014                dev_data->imageMap.erase(swapchain_image);
11015            }
11016        }
11017
11018        auto surface_state = getSurfaceState(dev_data->instance_data, swapchain_data->createInfo.surface);
11019        if (surface_state) {
11020            if (surface_state->swapchain == swapchain_data) surface_state->swapchain = nullptr;
11021            if (surface_state->old_swapchain == swapchain_data) surface_state->old_swapchain = nullptr;
11022        }
11023
11024        dev_data->device_extensions.swapchainMap.erase(swapchain);
11025    }
11026    lock.unlock();
11027    if (!skip_call) dev_data->dispatch_table.DestroySwapchainKHR(device, swapchain, pAllocator);
11028}
11029
11030VKAPI_ATTR VkResult VKAPI_CALL GetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pCount,
11031                                                     VkImage *pSwapchainImages) {
11032    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
11033    VkResult result = dev_data->dispatch_table.GetSwapchainImagesKHR(device, swapchain, pCount, pSwapchainImages);
11034
11035    if (result == VK_SUCCESS && pSwapchainImages != NULL) {
11036        // This should never happen and is checked by param checker.
11037        if (!pCount) return result;
11038        std::lock_guard<std::mutex> lock(global_lock);
11039        const size_t count = *pCount;
11040        auto swapchain_node = getSwapchainNode(dev_data, swapchain);
11041        if (swapchain_node && !swapchain_node->images.empty()) {
11042            // TODO : Not sure I like the memcmp here, but it works
11043            const bool mismatch = (swapchain_node->images.size() != count ||
11044                                   memcmp(&swapchain_node->images[0], pSwapchainImages, sizeof(swapchain_node->images[0]) * count));
11045            if (mismatch) {
11046                // TODO: Verify against Valid Usage section of extension
11047                log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
11048                        (uint64_t)swapchain, __LINE__, MEMTRACK_NONE, "SWAP_CHAIN",
11049                        "vkGetSwapchainInfoKHR(0x%" PRIx64
11050                        ", VK_SWAP_CHAIN_INFO_TYPE_PERSISTENT_IMAGES_KHR) returned mismatching data",
11051                        (uint64_t)(swapchain));
11052            }
11053        }
11054        for (uint32_t i = 0; i < *pCount; ++i) {
11055            IMAGE_LAYOUT_NODE image_layout_node;
11056            image_layout_node.layout = VK_IMAGE_LAYOUT_UNDEFINED;
11057            image_layout_node.format = swapchain_node->createInfo.imageFormat;
11058            // Add imageMap entries for each swapchain image
11059            VkImageCreateInfo image_ci = {};
11060            image_ci.mipLevels = 1;
11061            image_ci.arrayLayers = swapchain_node->createInfo.imageArrayLayers;
11062            image_ci.usage = swapchain_node->createInfo.imageUsage;
11063            image_ci.format = swapchain_node->createInfo.imageFormat;
11064            image_ci.samples = VK_SAMPLE_COUNT_1_BIT;
11065            image_ci.extent.width = swapchain_node->createInfo.imageExtent.width;
11066            image_ci.extent.height = swapchain_node->createInfo.imageExtent.height;
11067            image_ci.sharingMode = swapchain_node->createInfo.imageSharingMode;
11068            dev_data->imageMap[pSwapchainImages[i]] = unique_ptr<IMAGE_STATE>(new IMAGE_STATE(pSwapchainImages[i], &image_ci));
11069            auto &image_state = dev_data->imageMap[pSwapchainImages[i]];
11070            image_state->valid = false;
11071            image_state->binding.mem = MEMTRACKER_SWAP_CHAIN_IMAGE_KEY;
11072            swapchain_node->images.push_back(pSwapchainImages[i]);
11073            ImageSubresourcePair subpair = {pSwapchainImages[i], false, VkImageSubresource()};
11074            dev_data->imageSubresourceMap[pSwapchainImages[i]].push_back(subpair);
11075            dev_data->imageLayoutMap[subpair] = image_layout_node;
11076            dev_data->device_extensions.imageToSwapchainMap[pSwapchainImages[i]] = swapchain;
11077        }
11078    }
11079    return result;
11080}
11081
11082VKAPI_ATTR VkResult VKAPI_CALL QueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) {
11083    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
11084    bool skip_call = false;
11085
11086    std::lock_guard<std::mutex> lock(global_lock);
11087    auto queue_state = getQueueState(dev_data, queue);
11088
11089    for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
11090        auto pSemaphore = getSemaphoreNode(dev_data, pPresentInfo->pWaitSemaphores[i]);
11091        if (pSemaphore && !pSemaphore->signaled) {
11092            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
11093                                 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS,
11094                                 "DS", "Queue 0x%p is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.", queue,
11095                                 reinterpret_cast<const uint64_t &>(pPresentInfo->pWaitSemaphores[i]));
11096        }
11097    }
11098
11099    for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
11100        auto swapchain_data = getSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
11101        if (swapchain_data) {
11102            if (pPresentInfo->pImageIndices[i] >= swapchain_data->images.size()) {
11103                skip_call |= log_msg(
11104                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
11105                    reinterpret_cast<uint64_t const &>(pPresentInfo->pSwapchains[i]), __LINE__, DRAWSTATE_SWAPCHAIN_INVALID_IMAGE,
11106                    "DS", "vkQueuePresentKHR: Swapchain image index too large (%u). There are only %u images in this swapchain.",
11107                    pPresentInfo->pImageIndices[i], (uint32_t)swapchain_data->images.size());
11108            } else {
11109                auto image = swapchain_data->images[pPresentInfo->pImageIndices[i]];
11110                auto image_state = getImageState(dev_data, image);
11111                skip_call |= ValidateImageMemoryIsValid(dev_data, image_state, "vkQueuePresentKHR()");
11112
11113                if (!image_state->acquired) {
11114                    skip_call |= log_msg(
11115                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
11116                        reinterpret_cast<uint64_t const &>(pPresentInfo->pSwapchains[i]), __LINE__,
11117                        DRAWSTATE_SWAPCHAIN_IMAGE_NOT_ACQUIRED, "DS",
11118                        "vkQueuePresentKHR: Swapchain image index %u has not been acquired.", pPresentInfo->pImageIndices[i]);
11119                }
11120
11121                vector<VkImageLayout> layouts;
11122                if (FindLayouts(dev_data, image, layouts)) {
11123                    for (auto layout : layouts) {
11124                        if (layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) {
11125                            skip_call |=
11126                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
11127                                        reinterpret_cast<uint64_t &>(queue), __LINE__, VALIDATION_ERROR_01964, "DS",
11128                                        "Images passed to present must be in layout "
11129                                        "VK_IMAGE_LAYOUT_PRESENT_SRC_KHR but is in %s. %s",
11130                                        string_VkImageLayout(layout), validation_error_map[VALIDATION_ERROR_01964]);
11131                        }
11132                    }
11133                }
11134            }
11135
11136            // All physical devices and queue families are required to be able
11137            // to present to any native window on Android; require the
11138            // application to have established support on any other platform.
11139            if (!dev_data->instance_data->androidSurfaceExtensionEnabled) {
11140                auto surface_state = getSurfaceState(dev_data->instance_data, swapchain_data->createInfo.surface);
11141                auto support_it = surface_state->gpu_queue_support.find({dev_data->physical_device, queue_state->queueFamilyIndex});
11142
11143                if (support_it == surface_state->gpu_queue_support.end()) {
11144                    skip_call |=
11145                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
11146                                reinterpret_cast<uint64_t const &>(pPresentInfo->pSwapchains[i]), __LINE__,
11147                                DRAWSTATE_SWAPCHAIN_UNSUPPORTED_QUEUE, "DS",
11148                                "vkQueuePresentKHR: Presenting image without calling "
11149                                "vkGetPhysicalDeviceSurfaceSupportKHR");
11150                } else if (!support_it->second) {
11151                    skip_call |= log_msg(
11152                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
11153                        reinterpret_cast<uint64_t const &>(pPresentInfo->pSwapchains[i]), __LINE__, VALIDATION_ERROR_01961, "DS",
11154                        "vkQueuePresentKHR: Presenting image on queue that cannot "
11155                        "present to this surface. %s",
11156                        validation_error_map[VALIDATION_ERROR_01961]);
11157                }
11158            }
11159        }
11160    }
11161
11162    if (skip_call) {
11163        return VK_ERROR_VALIDATION_FAILED_EXT;
11164    }
11165
11166    VkResult result = dev_data->dispatch_table.QueuePresentKHR(queue, pPresentInfo);
11167
11168    if (result != VK_ERROR_VALIDATION_FAILED_EXT) {
11169        // Semaphore waits occur before error generation, if the call reached
11170        // the ICD. (Confirm?)
11171        for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
11172            auto pSemaphore = getSemaphoreNode(dev_data, pPresentInfo->pWaitSemaphores[i]);
11173            if (pSemaphore) {
11174                pSemaphore->signaler.first = VK_NULL_HANDLE;
11175                pSemaphore->signaled = false;
11176            }
11177        }
11178
11179        for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
11180            // Note: this is imperfect, in that we can get confused about what
11181            // did or didn't succeed-- but if the app does that, it's confused
11182            // itself just as much.
11183            auto local_result = pPresentInfo->pResults ? pPresentInfo->pResults[i] : result;
11184
11185            if (local_result != VK_SUCCESS && local_result != VK_SUBOPTIMAL_KHR) continue;  // this present didn't actually happen.
11186
11187            // Mark the image as having been released to the WSI
11188            auto swapchain_data = getSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
11189            auto image = swapchain_data->images[pPresentInfo->pImageIndices[i]];
11190            auto image_state = getImageState(dev_data, image);
11191            image_state->acquired = false;
11192        }
11193
11194        // Note: even though presentation is directed to a queue, there is no
11195        // direct ordering between QP and subsequent work, so QP (and its
11196        // semaphore waits) /never/ participate in any completion proof.
11197    }
11198
11199    return result;
11200}
11201
11202static bool PreCallValidateCreateSharedSwapchainsKHR(layer_data *dev_data, uint32_t swapchainCount,
11203                                                     const VkSwapchainCreateInfoKHR *pCreateInfos, VkSwapchainKHR *pSwapchains,
11204                                                     std::vector<SURFACE_STATE *> &surface_state,
11205                                                     std::vector<SWAPCHAIN_NODE *> &old_swapchain_state) {
11206    if (pCreateInfos) {
11207        std::lock_guard<std::mutex> lock(global_lock);
11208        for (uint32_t i = 0; i < swapchainCount; i++) {
11209            surface_state.push_back(getSurfaceState(dev_data->instance_data, pCreateInfos[i].surface));
11210            old_swapchain_state.push_back(getSwapchainNode(dev_data, pCreateInfos[i].oldSwapchain));
11211            std::stringstream func_name;
11212            func_name << "vkCreateSharedSwapchainsKHR[" << swapchainCount << "]";
11213            if (PreCallValidateCreateSwapchainKHR(dev_data, func_name.str().c_str(), &pCreateInfos[i], surface_state[i],
11214                                                  old_swapchain_state[i])) {
11215                return true;
11216            }
11217        }
11218    }
11219    return false;
11220}
11221
11222static void PostCallRecordCreateSharedSwapchainsKHR(layer_data *dev_data, VkResult result, uint32_t swapchainCount,
11223                                                    const VkSwapchainCreateInfoKHR *pCreateInfos, VkSwapchainKHR *pSwapchains,
11224                                                    std::vector<SURFACE_STATE *> &surface_state,
11225                                                    std::vector<SWAPCHAIN_NODE *> &old_swapchain_state) {
11226    if (VK_SUCCESS == result) {
11227        for (uint32_t i = 0; i < swapchainCount; i++) {
11228            auto swapchain_state = unique_ptr<SWAPCHAIN_NODE>(new SWAPCHAIN_NODE(&pCreateInfos[i], pSwapchains[i]));
11229            surface_state[i]->swapchain = swapchain_state.get();
11230            dev_data->device_extensions.swapchainMap[pSwapchains[i]] = std::move(swapchain_state);
11231        }
11232    } else {
11233        for (uint32_t i = 0; i < swapchainCount; i++) {
11234            surface_state[i]->swapchain = nullptr;
11235        }
11236    }
11237    // Spec requires that even if CreateSharedSwapchainKHR fails, oldSwapchain behaves as replaced.
11238    for (uint32_t i = 0; i < swapchainCount; i++) {
11239        if (old_swapchain_state[i]) {
11240            old_swapchain_state[i]->replaced = true;
11241        }
11242        surface_state[i]->old_swapchain = old_swapchain_state[i];
11243    }
11244    return;
11245}
11246
11247VKAPI_ATTR VkResult VKAPI_CALL CreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount,
11248                                                         const VkSwapchainCreateInfoKHR *pCreateInfos,
11249                                                         const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchains) {
11250    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
11251    std::vector<SURFACE_STATE *> surface_state;
11252    std::vector<SWAPCHAIN_NODE *> old_swapchain_state;
11253
11254    if (PreCallValidateCreateSharedSwapchainsKHR(dev_data, swapchainCount, pCreateInfos, pSwapchains, surface_state,
11255                                                 old_swapchain_state)) {
11256        return VK_ERROR_VALIDATION_FAILED_EXT;
11257    }
11258
11259    VkResult result =
11260        dev_data->dispatch_table.CreateSharedSwapchainsKHR(device, swapchainCount, pCreateInfos, pAllocator, pSwapchains);
11261
11262    PostCallRecordCreateSharedSwapchainsKHR(dev_data, result, swapchainCount, pCreateInfos, pSwapchains, surface_state,
11263                                            old_swapchain_state);
11264
11265    return result;
11266}
11267
11268VKAPI_ATTR VkResult VKAPI_CALL AcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
11269                                                   VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
11270    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
11271    bool skip_call = false;
11272
11273    std::unique_lock<std::mutex> lock(global_lock);
11274
11275    if (fence == VK_NULL_HANDLE && semaphore == VK_NULL_HANDLE) {
11276        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11277                             reinterpret_cast<uint64_t &>(device), __LINE__, DRAWSTATE_SWAPCHAIN_NO_SYNC_FOR_ACQUIRE, "DS",
11278                             "vkAcquireNextImageKHR: Semaphore and fence cannot both be VK_NULL_HANDLE. There would be no way "
11279                             "to determine the completion of this operation.");
11280    }
11281
11282    auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
11283    if (pSemaphore && pSemaphore->signaled) {
11284        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
11285                             reinterpret_cast<const uint64_t &>(semaphore), __LINE__, VALIDATION_ERROR_01952, "DS",
11286                             "vkAcquireNextImageKHR: Semaphore must not be currently signaled or in a wait state. %s",
11287                             validation_error_map[VALIDATION_ERROR_01952]);
11288    }
11289
11290    auto pFence = getFenceNode(dev_data, fence);
11291    if (pFence) {
11292        skip_call |= ValidateFenceForSubmit(dev_data, pFence);
11293    }
11294
11295    auto swapchain_data = getSwapchainNode(dev_data, swapchain);
11296
11297    if (swapchain_data->replaced) {
11298        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
11299                             reinterpret_cast<uint64_t &>(swapchain), __LINE__, DRAWSTATE_SWAPCHAIN_REPLACED, "DS",
11300                             "vkAcquireNextImageKHR: This swapchain has been replaced. The application can still "
11301                             "present any images it has acquired, but cannot acquire any more.");
11302    }
11303
11304    auto physical_device_state = getPhysicalDeviceState(dev_data->instance_data, dev_data->physical_device);
11305    if (physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState != UNCALLED) {
11306        uint64_t acquired_images = std::count_if(swapchain_data->images.begin(), swapchain_data->images.end(),
11307                                                 [=](VkImage image) { return getImageState(dev_data, image)->acquired; });
11308        if (acquired_images > swapchain_data->images.size() - physical_device_state->surfaceCapabilities.minImageCount) {
11309            skip_call |=
11310                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
11311                        reinterpret_cast<uint64_t const &>(swapchain), __LINE__, DRAWSTATE_SWAPCHAIN_TOO_MANY_IMAGES, "DS",
11312                        "vkAcquireNextImageKHR: Application has already acquired the maximum number of images (0x%" PRIxLEAST64 ")",
11313                        acquired_images);
11314        }
11315    }
11316
11317    if (swapchain_data->images.size() == 0) {
11318        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
11319                             reinterpret_cast<uint64_t const &>(swapchain), __LINE__, DRAWSTATE_SWAPCHAIN_IMAGES_NOT_FOUND, "DS",
11320                             "vkAcquireNextImageKHR: No images found to acquire from. Application probably did not call "
11321                             "vkGetSwapchainImagesKHR after swapchain creation.");
11322    }
11323
11324    lock.unlock();
11325
11326    if (skip_call) return VK_ERROR_VALIDATION_FAILED_EXT;
11327
11328    VkResult result = dev_data->dispatch_table.AcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex);
11329
11330    lock.lock();
11331    if (result == VK_SUCCESS || result == VK_SUBOPTIMAL_KHR) {
11332        if (pFence) {
11333            pFence->state = FENCE_INFLIGHT;
11334            pFence->signaler.first = VK_NULL_HANDLE;  // ANI isn't on a queue, so this can't participate in a completion proof.
11335        }
11336
11337        // A successful call to AcquireNextImageKHR counts as a signal operation on semaphore
11338        if (pSemaphore) {
11339            pSemaphore->signaled = true;
11340            pSemaphore->signaler.first = VK_NULL_HANDLE;
11341        }
11342
11343        // Mark the image as acquired.
11344        auto image = swapchain_data->images[*pImageIndex];
11345        auto image_state = getImageState(dev_data, image);
11346        image_state->acquired = true;
11347    }
11348    lock.unlock();
11349
11350    return result;
11351}
11352
11353VKAPI_ATTR VkResult VKAPI_CALL EnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount,
11354                                                        VkPhysicalDevice *pPhysicalDevices) {
11355    bool skip_call = false;
11356    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
11357    assert(instance_data);
11358
11359    // For this instance, flag when vkEnumeratePhysicalDevices goes to QUERY_COUNT and then QUERY_DETAILS
11360    if (NULL == pPhysicalDevices) {
11361        instance_data->vkEnumeratePhysicalDevicesState = QUERY_COUNT;
11362    } else {
11363        if (UNCALLED == instance_data->vkEnumeratePhysicalDevicesState) {
11364            // Flag warning here. You can call this without having queried the count, but it may not be
11365            // robust on platforms with multiple physical devices.
11366            skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11367                                 VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, 0, __LINE__, DEVLIMITS_MISSING_QUERY_COUNT, "DL",
11368                                 "Call sequence has vkEnumeratePhysicalDevices() w/ non-NULL pPhysicalDevices. You should first "
11369                                 "call vkEnumeratePhysicalDevices() w/ NULL pPhysicalDevices to query pPhysicalDeviceCount.");
11370        }  // TODO : Could also flag a warning if re-calling this function in QUERY_DETAILS state
11371        else if (instance_data->physical_devices_count != *pPhysicalDeviceCount) {
11372            // Having actual count match count from app is not a requirement, so this can be a warning
11373            skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11374                                 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
11375                                 "Call to vkEnumeratePhysicalDevices() w/ pPhysicalDeviceCount value %u, but actual count "
11376                                 "supported by this instance is %u.",
11377                                 *pPhysicalDeviceCount, instance_data->physical_devices_count);
11378        }
11379        instance_data->vkEnumeratePhysicalDevicesState = QUERY_DETAILS;
11380    }
11381    if (skip_call) {
11382        return VK_ERROR_VALIDATION_FAILED_EXT;
11383    }
11384    VkResult result = instance_data->dispatch_table.EnumeratePhysicalDevices(instance, pPhysicalDeviceCount, pPhysicalDevices);
11385    if (NULL == pPhysicalDevices) {
11386        instance_data->physical_devices_count = *pPhysicalDeviceCount;
11387    } else if (result == VK_SUCCESS) {  // Save physical devices
11388        for (uint32_t i = 0; i < *pPhysicalDeviceCount; i++) {
11389            auto &phys_device_state = instance_data->physical_device_map[pPhysicalDevices[i]];
11390            phys_device_state.phys_device = pPhysicalDevices[i];
11391            // Init actual features for each physical device
11392            instance_data->dispatch_table.GetPhysicalDeviceFeatures(pPhysicalDevices[i], &phys_device_state.features);
11393        }
11394    }
11395    return result;
11396}
11397
11398// Common function to handle validation for GetPhysicalDeviceQueueFamilyProperties & 2KHR version
11399static bool ValidateCommonGetPhysicalDeviceQueueFamilyProperties(instance_layer_data *instance_data,
11400                                                                 PHYSICAL_DEVICE_STATE *pd_state,
11401                                                                 uint32_t *pQueueFamilyPropertyCount, bool qfp_null,
11402                                                                 const char *count_var_name, const char *caller_name) {
11403    bool skip = false;
11404    if (qfp_null) {
11405        pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_COUNT;
11406    } else {
11407        // Verify that for each physical device, this function is called first with NULL pQueueFamilyProperties ptr in order to get
11408        // count
11409        if (UNCALLED == pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState) {
11410            skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11411                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_MISSING_QUERY_COUNT, "DL",
11412                            "Call sequence has %s() w/ non-NULL "
11413                            "pQueueFamilyProperties. You should first call %s() w/ "
11414                            "NULL pQueueFamilyProperties to query pCount.",
11415                            caller_name, caller_name);
11416        }
11417        // Then verify that pCount that is passed in on second call matches what was returned
11418        if (pd_state->queueFamilyPropertiesCount != *pQueueFamilyPropertyCount) {
11419            // TODO: this is not a requirement of the Valid Usage section for vkGetPhysicalDeviceQueueFamilyProperties, so
11420            // provide as warning
11421            skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11422                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
11423                            "Call to %s() w/ %s value %u, but actual count supported by this physicalDevice is %u.", caller_name,
11424                            count_var_name, *pQueueFamilyPropertyCount, pd_state->queueFamilyPropertiesCount);
11425        }
11426        pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_DETAILS;
11427    }
11428    return skip;
11429}
11430
11431static bool PreCallValidateGetPhysicalDeviceQueueFamilyProperties(instance_layer_data *instance_data,
11432                                                                  PHYSICAL_DEVICE_STATE *pd_state, uint32_t *pCount,
11433                                                                  VkQueueFamilyProperties *pQueueFamilyProperties) {
11434    return ValidateCommonGetPhysicalDeviceQueueFamilyProperties(instance_data, pd_state, pCount,
11435                                                                (nullptr == pQueueFamilyProperties), "pCount",
11436                                                                "vkGetPhysicalDeviceQueueFamilyProperties()");
11437}
11438
11439static bool PreCallValidateGetPhysicalDeviceQueueFamilyProperties2KHR(instance_layer_data *instance_data,
11440                                                                      PHYSICAL_DEVICE_STATE *pd_state,
11441                                                                      uint32_t *pQueueFamilyPropertyCount,
11442                                                                      VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
11443    return ValidateCommonGetPhysicalDeviceQueueFamilyProperties(instance_data, pd_state, pQueueFamilyPropertyCount,
11444                                                                (nullptr == pQueueFamilyProperties), "pQueueFamilyPropertyCount",
11445                                                                "vkGetPhysicalDeviceQueueFamilyProperties2KHR()");
11446}
11447
11448// Common function to update state for GetPhysicalDeviceQueueFamilyProperties & 2KHR version
11449static void StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(PHYSICAL_DEVICE_STATE *pd_state, uint32_t count,
11450                                                                    VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
11451    if (!pQueueFamilyProperties) {
11452        pd_state->queueFamilyPropertiesCount = count;
11453    } else {  // Save queue family properties
11454        if (pd_state->queue_family_properties.size() < count) pd_state->queue_family_properties.resize(count);
11455        for (uint32_t i = 0; i < count; i++) {
11456            pd_state->queue_family_properties[i] = pQueueFamilyProperties[i].queueFamilyProperties;
11457        }
11458    }
11459}
11460
11461static void PostCallRecordGetPhysicalDeviceQueueFamilyProperties(PHYSICAL_DEVICE_STATE *pd_state, uint32_t count,
11462                                                                 VkQueueFamilyProperties *pQueueFamilyProperties) {
11463    VkQueueFamilyProperties2KHR *pqfp = nullptr;
11464    std::vector<VkQueueFamilyProperties2KHR> qfp;
11465    qfp.resize(count);
11466    if (pQueueFamilyProperties) {
11467        for (uint32_t i = 0; i < count; ++i) {
11468            qfp[i].sType = VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2_KHR;
11469            qfp[i].pNext = nullptr;
11470            qfp[i].queueFamilyProperties = pQueueFamilyProperties[i];
11471        }
11472        pqfp = qfp.data();
11473    }
11474    StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(pd_state, count, pqfp);
11475}
11476
11477static void PostCallRecordGetPhysicalDeviceQueueFamilyProperties2KHR(PHYSICAL_DEVICE_STATE *pd_state, uint32_t count,
11478                                                                     VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
11479    StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(pd_state, count, pQueueFamilyProperties);
11480}
11481
11482VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
11483                                                                  VkQueueFamilyProperties *pQueueFamilyProperties) {
11484    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11485    auto physical_device_state = getPhysicalDeviceState(instance_data, physicalDevice);
11486    assert(physical_device_state);
11487    bool skip =
11488        PreCallValidateGetPhysicalDeviceQueueFamilyProperties(instance_data, physical_device_state, pCount, pQueueFamilyProperties);
11489    if (skip) {
11490        return;
11491    }
11492    instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(physicalDevice, pCount, pQueueFamilyProperties);
11493    PostCallRecordGetPhysicalDeviceQueueFamilyProperties(physical_device_state, *pCount, pQueueFamilyProperties);
11494}
11495
11496VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceQueueFamilyProperties2KHR(VkPhysicalDevice physicalDevice,
11497                                                                      uint32_t *pQueueFamilyPropertyCount,
11498                                                                      VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
11499    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11500    auto physical_device_state = getPhysicalDeviceState(instance_data, physicalDevice);
11501    assert(physical_device_state);
11502    bool skip = PreCallValidateGetPhysicalDeviceQueueFamilyProperties2KHR(instance_data, physical_device_state,
11503                                                                          pQueueFamilyPropertyCount, pQueueFamilyProperties);
11504    if (skip) {
11505        return;
11506    }
11507    instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties2KHR(physicalDevice, pQueueFamilyPropertyCount,
11508                                                                             pQueueFamilyProperties);
11509    PostCallRecordGetPhysicalDeviceQueueFamilyProperties2KHR(physical_device_state, *pQueueFamilyPropertyCount,
11510                                                             pQueueFamilyProperties);
11511}
11512
11513template <typename TCreateInfo, typename FPtr>
11514static VkResult CreateSurface(VkInstance instance, TCreateInfo const *pCreateInfo, VkAllocationCallbacks const *pAllocator,
11515                              VkSurfaceKHR *pSurface, FPtr fptr) {
11516    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
11517
11518    // Call down the call chain:
11519    VkResult result = (instance_data->dispatch_table.*fptr)(instance, pCreateInfo, pAllocator, pSurface);
11520
11521    if (result == VK_SUCCESS) {
11522        std::unique_lock<std::mutex> lock(global_lock);
11523        instance_data->surface_map[*pSurface] = SURFACE_STATE(*pSurface);
11524        lock.unlock();
11525    }
11526
11527    return result;
11528}
11529
11530VKAPI_ATTR void VKAPI_CALL DestroySurfaceKHR(VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks *pAllocator) {
11531    bool skip_call = false;
11532    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
11533    std::unique_lock<std::mutex> lock(global_lock);
11534    auto surface_state = getSurfaceState(instance_data, surface);
11535
11536    if (surface_state) {
11537        // TODO: track swapchains created from this surface.
11538        instance_data->surface_map.erase(surface);
11539    }
11540    lock.unlock();
11541
11542    if (!skip_call) {
11543        // Call down the call chain:
11544        instance_data->dispatch_table.DestroySurfaceKHR(instance, surface, pAllocator);
11545    }
11546}
11547
11548VKAPI_ATTR VkResult VKAPI_CALL CreateDisplayPlaneSurfaceKHR(VkInstance instance, const VkDisplaySurfaceCreateInfoKHR *pCreateInfo,
11549                                                            const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11550    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateDisplayPlaneSurfaceKHR);
11551}
11552
11553#ifdef VK_USE_PLATFORM_ANDROID_KHR
11554VKAPI_ATTR VkResult VKAPI_CALL CreateAndroidSurfaceKHR(VkInstance instance, const VkAndroidSurfaceCreateInfoKHR *pCreateInfo,
11555                                                       const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11556    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateAndroidSurfaceKHR);
11557}
11558#endif  // VK_USE_PLATFORM_ANDROID_KHR
11559
11560#ifdef VK_USE_PLATFORM_MIR_KHR
11561VKAPI_ATTR VkResult VKAPI_CALL CreateMirSurfaceKHR(VkInstance instance, const VkMirSurfaceCreateInfoKHR *pCreateInfo,
11562                                                   const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11563    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateMirSurfaceKHR);
11564}
11565#endif  // VK_USE_PLATFORM_MIR_KHR
11566
11567#ifdef VK_USE_PLATFORM_WAYLAND_KHR
11568VKAPI_ATTR VkResult VKAPI_CALL CreateWaylandSurfaceKHR(VkInstance instance, const VkWaylandSurfaceCreateInfoKHR *pCreateInfo,
11569                                                       const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11570    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateWaylandSurfaceKHR);
11571}
11572#endif  // VK_USE_PLATFORM_WAYLAND_KHR
11573
11574#ifdef VK_USE_PLATFORM_WIN32_KHR
11575VKAPI_ATTR VkResult VKAPI_CALL CreateWin32SurfaceKHR(VkInstance instance, const VkWin32SurfaceCreateInfoKHR *pCreateInfo,
11576                                                     const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11577    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateWin32SurfaceKHR);
11578}
11579#endif  // VK_USE_PLATFORM_WIN32_KHR
11580
11581#ifdef VK_USE_PLATFORM_XCB_KHR
11582VKAPI_ATTR VkResult VKAPI_CALL CreateXcbSurfaceKHR(VkInstance instance, const VkXcbSurfaceCreateInfoKHR *pCreateInfo,
11583                                                   const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11584    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateXcbSurfaceKHR);
11585}
11586#endif  // VK_USE_PLATFORM_XCB_KHR
11587
11588#ifdef VK_USE_PLATFORM_XLIB_KHR
11589VKAPI_ATTR VkResult VKAPI_CALL CreateXlibSurfaceKHR(VkInstance instance, const VkXlibSurfaceCreateInfoKHR *pCreateInfo,
11590                                                    const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11591    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateXlibSurfaceKHR);
11592}
11593#endif  // VK_USE_PLATFORM_XLIB_KHR
11594
11595VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
11596                                                                       VkSurfaceCapabilitiesKHR *pSurfaceCapabilities) {
11597    auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11598
11599    std::unique_lock<std::mutex> lock(global_lock);
11600    auto physical_device_state = getPhysicalDeviceState(instance_data, physicalDevice);
11601    lock.unlock();
11602
11603    auto result =
11604        instance_data->dispatch_table.GetPhysicalDeviceSurfaceCapabilitiesKHR(physicalDevice, surface, pSurfaceCapabilities);
11605
11606    if (result == VK_SUCCESS) {
11607        physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState = QUERY_DETAILS;
11608        physical_device_state->surfaceCapabilities = *pSurfaceCapabilities;
11609    }
11610
11611    return result;
11612}
11613
11614VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex,
11615                                                                  VkSurfaceKHR surface, VkBool32 *pSupported) {
11616    auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11617    std::unique_lock<std::mutex> lock(global_lock);
11618    auto surface_state = getSurfaceState(instance_data, surface);
11619    lock.unlock();
11620
11621    auto result =
11622        instance_data->dispatch_table.GetPhysicalDeviceSurfaceSupportKHR(physicalDevice, queueFamilyIndex, surface, pSupported);
11623
11624    if (result == VK_SUCCESS) {
11625        surface_state->gpu_queue_support[{physicalDevice, queueFamilyIndex}] = (*pSupported != 0);
11626    }
11627
11628    return result;
11629}
11630
11631VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfacePresentModesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
11632                                                                       uint32_t *pPresentModeCount,
11633                                                                       VkPresentModeKHR *pPresentModes) {
11634    bool skip_call = false;
11635    auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11636    std::unique_lock<std::mutex> lock(global_lock);
11637    // TODO: this isn't quite right. available modes may differ by surface AND physical device.
11638    auto physical_device_state = getPhysicalDeviceState(instance_data, physicalDevice);
11639    auto &call_state = physical_device_state->vkGetPhysicalDeviceSurfacePresentModesKHRState;
11640
11641    if (pPresentModes) {
11642        // Compare the preliminary value of *pPresentModeCount with the value this time:
11643        auto prev_mode_count = (uint32_t)physical_device_state->present_modes.size();
11644        switch (call_state) {
11645            case UNCALLED:
11646                skip_call |= log_msg(
11647                    instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
11648                    reinterpret_cast<uint64_t>(physicalDevice), __LINE__, DEVLIMITS_MUST_QUERY_COUNT, "DL",
11649                    "vkGetPhysicalDeviceSurfacePresentModesKHR() called with non-NULL pPresentModeCount; but no prior positive "
11650                    "value has been seen for pPresentModeCount.");
11651                break;
11652            default:
11653                // both query count and query details
11654                if (*pPresentModeCount != prev_mode_count) {
11655                    skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11656                                         VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
11657                                         reinterpret_cast<uint64_t>(physicalDevice), __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
11658                                         "vkGetPhysicalDeviceSurfacePresentModesKHR() called with *pPresentModeCount (%u) that "
11659                                         "differs from the value "
11660                                         "(%u) that was returned when pPresentModes was NULL.",
11661                                         *pPresentModeCount, prev_mode_count);
11662                }
11663                break;
11664        }
11665    }
11666    lock.unlock();
11667
11668    if (skip_call) return VK_ERROR_VALIDATION_FAILED_EXT;
11669
11670    auto result = instance_data->dispatch_table.GetPhysicalDeviceSurfacePresentModesKHR(physicalDevice, surface, pPresentModeCount,
11671                                                                                        pPresentModes);
11672
11673    if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
11674        lock.lock();
11675
11676        if (*pPresentModeCount) {
11677            if (call_state < QUERY_COUNT) call_state = QUERY_COUNT;
11678            if (*pPresentModeCount > physical_device_state->present_modes.size())
11679                physical_device_state->present_modes.resize(*pPresentModeCount);
11680        }
11681        if (pPresentModes) {
11682            if (call_state < QUERY_DETAILS) call_state = QUERY_DETAILS;
11683            for (uint32_t i = 0; i < *pPresentModeCount; i++) {
11684                physical_device_state->present_modes[i] = pPresentModes[i];
11685            }
11686        }
11687    }
11688
11689    return result;
11690}
11691
11692VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
11693                                                                  uint32_t *pSurfaceFormatCount,
11694                                                                  VkSurfaceFormatKHR *pSurfaceFormats) {
11695    bool skip_call = false;
11696    auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11697    std::unique_lock<std::mutex> lock(global_lock);
11698    auto physical_device_state = getPhysicalDeviceState(instance_data, physicalDevice);
11699    auto &call_state = physical_device_state->vkGetPhysicalDeviceSurfaceFormatsKHRState;
11700
11701    if (pSurfaceFormats) {
11702        auto prev_format_count = (uint32_t)physical_device_state->surface_formats.size();
11703
11704        switch (call_state) {
11705            case UNCALLED:
11706                // Since we haven't recorded a preliminary value of *pSurfaceFormatCount, that likely means that the application
11707                // didn't
11708                // previously call this function with a NULL value of pSurfaceFormats:
11709                skip_call |= log_msg(
11710                    instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
11711                    reinterpret_cast<uint64_t>(physicalDevice), __LINE__, DEVLIMITS_MUST_QUERY_COUNT, "DL",
11712                    "vkGetPhysicalDeviceSurfaceFormatsKHR() called with non-NULL pSurfaceFormatCount; but no prior positive "
11713                    "value has been seen for pSurfaceFormats.");
11714                break;
11715            default:
11716                if (prev_format_count != *pSurfaceFormatCount) {
11717                    skip_call |= log_msg(
11718                        instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11719                        VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, reinterpret_cast<uint64_t>(physicalDevice), __LINE__,
11720                        DEVLIMITS_COUNT_MISMATCH, "DL",
11721                        "vkGetPhysicalDeviceSurfaceFormatsKHR() called with non-NULL pSurfaceFormatCount, and with pSurfaceFormats "
11722                        "set "
11723                        "to "
11724                        "a value (%u) that is greater than the value (%u) that was returned when pSurfaceFormatCount was NULL.",
11725                        *pSurfaceFormatCount, prev_format_count);
11726                }
11727                break;
11728        }
11729    }
11730    lock.unlock();
11731
11732    if (skip_call) return VK_ERROR_VALIDATION_FAILED_EXT;
11733
11734    // Call down the call chain:
11735    auto result = instance_data->dispatch_table.GetPhysicalDeviceSurfaceFormatsKHR(physicalDevice, surface, pSurfaceFormatCount,
11736                                                                                   pSurfaceFormats);
11737
11738    if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
11739        lock.lock();
11740
11741        if (*pSurfaceFormatCount) {
11742            if (call_state < QUERY_COUNT) call_state = QUERY_COUNT;
11743            if (*pSurfaceFormatCount > physical_device_state->surface_formats.size())
11744                physical_device_state->surface_formats.resize(*pSurfaceFormatCount);
11745        }
11746        if (pSurfaceFormats) {
11747            if (call_state < QUERY_DETAILS) call_state = QUERY_DETAILS;
11748            for (uint32_t i = 0; i < *pSurfaceFormatCount; i++) {
11749                physical_device_state->surface_formats[i] = pSurfaceFormats[i];
11750            }
11751        }
11752    }
11753    return result;
11754}
11755
11756VKAPI_ATTR VkResult VKAPI_CALL CreateDebugReportCallbackEXT(VkInstance instance,
11757                                                            const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
11758                                                            const VkAllocationCallbacks *pAllocator,
11759                                                            VkDebugReportCallbackEXT *pMsgCallback) {
11760    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
11761    VkResult res = instance_data->dispatch_table.CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
11762    if (VK_SUCCESS == res) {
11763        std::lock_guard<std::mutex> lock(global_lock);
11764        res = layer_create_msg_callback(instance_data->report_data, false, pCreateInfo, pAllocator, pMsgCallback);
11765    }
11766    return res;
11767}
11768
11769VKAPI_ATTR void VKAPI_CALL DestroyDebugReportCallbackEXT(VkInstance instance, VkDebugReportCallbackEXT msgCallback,
11770                                                         const VkAllocationCallbacks *pAllocator) {
11771    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
11772    instance_data->dispatch_table.DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
11773    std::lock_guard<std::mutex> lock(global_lock);
11774    layer_destroy_msg_callback(instance_data->report_data, msgCallback, pAllocator);
11775}
11776
11777VKAPI_ATTR void VKAPI_CALL DebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags,
11778                                                 VkDebugReportObjectTypeEXT objType, uint64_t object, size_t location,
11779                                                 int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
11780    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
11781    instance_data->dispatch_table.DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, pMsg);
11782}
11783
11784VKAPI_ATTR VkResult VKAPI_CALL EnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
11785    return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
11786}
11787
11788VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
11789                                                              VkLayerProperties *pProperties) {
11790    return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
11791}
11792
11793VKAPI_ATTR VkResult VKAPI_CALL EnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount,
11794                                                                    VkExtensionProperties *pProperties) {
11795    if (pLayerName && !strcmp(pLayerName, global_layer.layerName))
11796        return util_GetExtensionProperties(1, instance_extensions, pCount, pProperties);
11797
11798    return VK_ERROR_LAYER_NOT_PRESENT;
11799}
11800
11801VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice, const char *pLayerName,
11802                                                                  uint32_t *pCount, VkExtensionProperties *pProperties) {
11803    if (pLayerName && !strcmp(pLayerName, global_layer.layerName)) return util_GetExtensionProperties(0, NULL, pCount, pProperties);
11804
11805    assert(physicalDevice);
11806
11807    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11808    return instance_data->dispatch_table.EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties);
11809}
11810
11811static PFN_vkVoidFunction intercept_core_instance_command(const char *name);
11812
11813static PFN_vkVoidFunction intercept_core_device_command(const char *name);
11814
11815static PFN_vkVoidFunction intercept_khr_swapchain_command(const char *name, VkDevice dev);
11816
11817static PFN_vkVoidFunction intercept_khr_surface_command(const char *name, VkInstance instance);
11818
11819static PFN_vkVoidFunction intercept_extension_instance_commands(const char *name, VkInstance instance);
11820
11821VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetDeviceProcAddr(VkDevice dev, const char *funcName) {
11822    PFN_vkVoidFunction proc = intercept_core_device_command(funcName);
11823    if (proc) return proc;
11824
11825    assert(dev);
11826
11827    proc = intercept_khr_swapchain_command(funcName, dev);
11828    if (proc) return proc;
11829
11830    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(dev), layer_data_map);
11831
11832    auto &table = dev_data->dispatch_table;
11833    if (!table.GetDeviceProcAddr) return nullptr;
11834    return table.GetDeviceProcAddr(dev, funcName);
11835}
11836
11837VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetInstanceProcAddr(VkInstance instance, const char *funcName) {
11838    PFN_vkVoidFunction proc = intercept_core_instance_command(funcName);
11839    if (!proc) proc = intercept_core_device_command(funcName);
11840    if (!proc) proc = intercept_khr_swapchain_command(funcName, VK_NULL_HANDLE);
11841    if (!proc) proc = intercept_khr_surface_command(funcName, instance);
11842    if (proc) return proc;
11843
11844    assert(instance);
11845
11846    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
11847    proc = debug_report_get_instance_proc_addr(instance_data->report_data, funcName);
11848    if (proc) return proc;
11849
11850    proc = intercept_extension_instance_commands(funcName, instance);
11851    if (proc) return proc;
11852
11853    auto &table = instance_data->dispatch_table;
11854    if (!table.GetInstanceProcAddr) return nullptr;
11855    return table.GetInstanceProcAddr(instance, funcName);
11856}
11857
11858VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetPhysicalDeviceProcAddr(VkInstance instance, const char *funcName) {
11859    assert(instance);
11860
11861    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
11862
11863    auto &table = instance_data->dispatch_table;
11864    if (!table.GetPhysicalDeviceProcAddr) return nullptr;
11865    return table.GetPhysicalDeviceProcAddr(instance, funcName);
11866}
11867
11868static PFN_vkVoidFunction intercept_core_instance_command(const char *name) {
11869    static const struct {
11870        const char *name;
11871        PFN_vkVoidFunction proc;
11872    } core_instance_commands[] = {
11873        {"vkGetInstanceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetInstanceProcAddr)},
11874        {"vk_layerGetPhysicalDeviceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceProcAddr)},
11875        {"vkGetDeviceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceProcAddr)},
11876        {"vkCreateInstance", reinterpret_cast<PFN_vkVoidFunction>(CreateInstance)},
11877        {"vkCreateDevice", reinterpret_cast<PFN_vkVoidFunction>(CreateDevice)},
11878        {"vkEnumeratePhysicalDevices", reinterpret_cast<PFN_vkVoidFunction>(EnumeratePhysicalDevices)},
11879        {"vkGetPhysicalDeviceQueueFamilyProperties", reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceQueueFamilyProperties)},
11880        {"vkDestroyInstance", reinterpret_cast<PFN_vkVoidFunction>(DestroyInstance)},
11881        {"vkEnumerateInstanceLayerProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateInstanceLayerProperties)},
11882        {"vkEnumerateDeviceLayerProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateDeviceLayerProperties)},
11883        {"vkEnumerateInstanceExtensionProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateInstanceExtensionProperties)},
11884        {"vkEnumerateDeviceExtensionProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateDeviceExtensionProperties)},
11885    };
11886
11887    for (size_t i = 0; i < ARRAY_SIZE(core_instance_commands); i++) {
11888        if (!strcmp(core_instance_commands[i].name, name)) return core_instance_commands[i].proc;
11889    }
11890
11891    return nullptr;
11892}
11893
11894static PFN_vkVoidFunction intercept_core_device_command(const char *name) {
11895    static const struct {
11896        const char *name;
11897        PFN_vkVoidFunction proc;
11898    } core_device_commands[] = {
11899        {"vkGetDeviceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceProcAddr)},
11900        {"vkQueueSubmit", reinterpret_cast<PFN_vkVoidFunction>(QueueSubmit)},
11901        {"vkWaitForFences", reinterpret_cast<PFN_vkVoidFunction>(WaitForFences)},
11902        {"vkGetFenceStatus", reinterpret_cast<PFN_vkVoidFunction>(GetFenceStatus)},
11903        {"vkQueueWaitIdle", reinterpret_cast<PFN_vkVoidFunction>(QueueWaitIdle)},
11904        {"vkDeviceWaitIdle", reinterpret_cast<PFN_vkVoidFunction>(DeviceWaitIdle)},
11905        {"vkGetDeviceQueue", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceQueue)},
11906        {"vkDestroyInstance", reinterpret_cast<PFN_vkVoidFunction>(DestroyInstance)},
11907        {"vkDestroyDevice", reinterpret_cast<PFN_vkVoidFunction>(DestroyDevice)},
11908        {"vkDestroyFence", reinterpret_cast<PFN_vkVoidFunction>(DestroyFence)},
11909        {"vkResetFences", reinterpret_cast<PFN_vkVoidFunction>(ResetFences)},
11910        {"vkDestroySemaphore", reinterpret_cast<PFN_vkVoidFunction>(DestroySemaphore)},
11911        {"vkDestroyEvent", reinterpret_cast<PFN_vkVoidFunction>(DestroyEvent)},
11912        {"vkDestroyQueryPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyQueryPool)},
11913        {"vkDestroyBuffer", reinterpret_cast<PFN_vkVoidFunction>(DestroyBuffer)},
11914        {"vkDestroyBufferView", reinterpret_cast<PFN_vkVoidFunction>(DestroyBufferView)},
11915        {"vkDestroyImage", reinterpret_cast<PFN_vkVoidFunction>(DestroyImage)},
11916        {"vkDestroyImageView", reinterpret_cast<PFN_vkVoidFunction>(DestroyImageView)},
11917        {"vkDestroyShaderModule", reinterpret_cast<PFN_vkVoidFunction>(DestroyShaderModule)},
11918        {"vkDestroyPipeline", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipeline)},
11919        {"vkDestroyPipelineLayout", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipelineLayout)},
11920        {"vkDestroySampler", reinterpret_cast<PFN_vkVoidFunction>(DestroySampler)},
11921        {"vkDestroyDescriptorSetLayout", reinterpret_cast<PFN_vkVoidFunction>(DestroyDescriptorSetLayout)},
11922        {"vkDestroyDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyDescriptorPool)},
11923        {"vkDestroyFramebuffer", reinterpret_cast<PFN_vkVoidFunction>(DestroyFramebuffer)},
11924        {"vkDestroyRenderPass", reinterpret_cast<PFN_vkVoidFunction>(DestroyRenderPass)},
11925        {"vkCreateBuffer", reinterpret_cast<PFN_vkVoidFunction>(CreateBuffer)},
11926        {"vkCreateBufferView", reinterpret_cast<PFN_vkVoidFunction>(CreateBufferView)},
11927        {"vkCreateImage", reinterpret_cast<PFN_vkVoidFunction>(CreateImage)},
11928        {"vkCreateImageView", reinterpret_cast<PFN_vkVoidFunction>(CreateImageView)},
11929        {"vkCreateFence", reinterpret_cast<PFN_vkVoidFunction>(CreateFence)},
11930        {"vkCreatePipelineCache", reinterpret_cast<PFN_vkVoidFunction>(CreatePipelineCache)},
11931        {"vkDestroyPipelineCache", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipelineCache)},
11932        {"vkGetPipelineCacheData", reinterpret_cast<PFN_vkVoidFunction>(GetPipelineCacheData)},
11933        {"vkMergePipelineCaches", reinterpret_cast<PFN_vkVoidFunction>(MergePipelineCaches)},
11934        {"vkCreateGraphicsPipelines", reinterpret_cast<PFN_vkVoidFunction>(CreateGraphicsPipelines)},
11935        {"vkCreateComputePipelines", reinterpret_cast<PFN_vkVoidFunction>(CreateComputePipelines)},
11936        {"vkCreateSampler", reinterpret_cast<PFN_vkVoidFunction>(CreateSampler)},
11937        {"vkCreateDescriptorSetLayout", reinterpret_cast<PFN_vkVoidFunction>(CreateDescriptorSetLayout)},
11938        {"vkCreatePipelineLayout", reinterpret_cast<PFN_vkVoidFunction>(CreatePipelineLayout)},
11939        {"vkCreateDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(CreateDescriptorPool)},
11940        {"vkResetDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(ResetDescriptorPool)},
11941        {"vkAllocateDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(AllocateDescriptorSets)},
11942        {"vkFreeDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(FreeDescriptorSets)},
11943        {"vkUpdateDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(UpdateDescriptorSets)},
11944        {"vkCreateCommandPool", reinterpret_cast<PFN_vkVoidFunction>(CreateCommandPool)},
11945        {"vkDestroyCommandPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyCommandPool)},
11946        {"vkResetCommandPool", reinterpret_cast<PFN_vkVoidFunction>(ResetCommandPool)},
11947        {"vkCreateQueryPool", reinterpret_cast<PFN_vkVoidFunction>(CreateQueryPool)},
11948        {"vkAllocateCommandBuffers", reinterpret_cast<PFN_vkVoidFunction>(AllocateCommandBuffers)},
11949        {"vkFreeCommandBuffers", reinterpret_cast<PFN_vkVoidFunction>(FreeCommandBuffers)},
11950        {"vkBeginCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(BeginCommandBuffer)},
11951        {"vkEndCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(EndCommandBuffer)},
11952        {"vkResetCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(ResetCommandBuffer)},
11953        {"vkCmdBindPipeline", reinterpret_cast<PFN_vkVoidFunction>(CmdBindPipeline)},
11954        {"vkCmdSetViewport", reinterpret_cast<PFN_vkVoidFunction>(CmdSetViewport)},
11955        {"vkCmdSetScissor", reinterpret_cast<PFN_vkVoidFunction>(CmdSetScissor)},
11956        {"vkCmdSetLineWidth", reinterpret_cast<PFN_vkVoidFunction>(CmdSetLineWidth)},
11957        {"vkCmdSetDepthBias", reinterpret_cast<PFN_vkVoidFunction>(CmdSetDepthBias)},
11958        {"vkCmdSetBlendConstants", reinterpret_cast<PFN_vkVoidFunction>(CmdSetBlendConstants)},
11959        {"vkCmdSetDepthBounds", reinterpret_cast<PFN_vkVoidFunction>(CmdSetDepthBounds)},
11960        {"vkCmdSetStencilCompareMask", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilCompareMask)},
11961        {"vkCmdSetStencilWriteMask", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilWriteMask)},
11962        {"vkCmdSetStencilReference", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilReference)},
11963        {"vkCmdBindDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(CmdBindDescriptorSets)},
11964        {"vkCmdBindVertexBuffers", reinterpret_cast<PFN_vkVoidFunction>(CmdBindVertexBuffers)},
11965        {"vkCmdBindIndexBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdBindIndexBuffer)},
11966        {"vkCmdDraw", reinterpret_cast<PFN_vkVoidFunction>(CmdDraw)},
11967        {"vkCmdDrawIndexed", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndexed)},
11968        {"vkCmdDrawIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndirect)},
11969        {"vkCmdDrawIndexedIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndexedIndirect)},
11970        {"vkCmdDispatch", reinterpret_cast<PFN_vkVoidFunction>(CmdDispatch)},
11971        {"vkCmdDispatchIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDispatchIndirect)},
11972        {"vkCmdCopyBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyBuffer)},
11973        {"vkCmdCopyImage", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyImage)},
11974        {"vkCmdBlitImage", reinterpret_cast<PFN_vkVoidFunction>(CmdBlitImage)},
11975        {"vkCmdCopyBufferToImage", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyBufferToImage)},
11976        {"vkCmdCopyImageToBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyImageToBuffer)},
11977        {"vkCmdUpdateBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdUpdateBuffer)},
11978        {"vkCmdFillBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdFillBuffer)},
11979        {"vkCmdClearColorImage", reinterpret_cast<PFN_vkVoidFunction>(CmdClearColorImage)},
11980        {"vkCmdClearDepthStencilImage", reinterpret_cast<PFN_vkVoidFunction>(CmdClearDepthStencilImage)},
11981        {"vkCmdClearAttachments", reinterpret_cast<PFN_vkVoidFunction>(CmdClearAttachments)},
11982        {"vkCmdResolveImage", reinterpret_cast<PFN_vkVoidFunction>(CmdResolveImage)},
11983        {"vkGetImageSubresourceLayout", reinterpret_cast<PFN_vkVoidFunction>(GetImageSubresourceLayout) },
11984        {"vkCmdSetEvent", reinterpret_cast<PFN_vkVoidFunction>(CmdSetEvent)},
11985        {"vkCmdResetEvent", reinterpret_cast<PFN_vkVoidFunction>(CmdResetEvent)},
11986        {"vkCmdWaitEvents", reinterpret_cast<PFN_vkVoidFunction>(CmdWaitEvents)},
11987        {"vkCmdPipelineBarrier", reinterpret_cast<PFN_vkVoidFunction>(CmdPipelineBarrier)},
11988        {"vkCmdBeginQuery", reinterpret_cast<PFN_vkVoidFunction>(CmdBeginQuery)},
11989        {"vkCmdEndQuery", reinterpret_cast<PFN_vkVoidFunction>(CmdEndQuery)},
11990        {"vkCmdResetQueryPool", reinterpret_cast<PFN_vkVoidFunction>(CmdResetQueryPool)},
11991        {"vkCmdCopyQueryPoolResults", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyQueryPoolResults)},
11992        {"vkCmdPushConstants", reinterpret_cast<PFN_vkVoidFunction>(CmdPushConstants)},
11993        {"vkCmdWriteTimestamp", reinterpret_cast<PFN_vkVoidFunction>(CmdWriteTimestamp)},
11994        {"vkCreateFramebuffer", reinterpret_cast<PFN_vkVoidFunction>(CreateFramebuffer)},
11995        {"vkCreateShaderModule", reinterpret_cast<PFN_vkVoidFunction>(CreateShaderModule)},
11996        {"vkCreateRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CreateRenderPass)},
11997        {"vkCmdBeginRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CmdBeginRenderPass)},
11998        {"vkCmdNextSubpass", reinterpret_cast<PFN_vkVoidFunction>(CmdNextSubpass)},
11999        {"vkCmdEndRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CmdEndRenderPass)},
12000        {"vkCmdExecuteCommands", reinterpret_cast<PFN_vkVoidFunction>(CmdExecuteCommands)},
12001        {"vkSetEvent", reinterpret_cast<PFN_vkVoidFunction>(SetEvent)},
12002        {"vkMapMemory", reinterpret_cast<PFN_vkVoidFunction>(MapMemory)},
12003        {"vkUnmapMemory", reinterpret_cast<PFN_vkVoidFunction>(UnmapMemory)},
12004        {"vkFlushMappedMemoryRanges", reinterpret_cast<PFN_vkVoidFunction>(FlushMappedMemoryRanges)},
12005        {"vkInvalidateMappedMemoryRanges", reinterpret_cast<PFN_vkVoidFunction>(InvalidateMappedMemoryRanges)},
12006        {"vkAllocateMemory", reinterpret_cast<PFN_vkVoidFunction>(AllocateMemory)},
12007        {"vkFreeMemory", reinterpret_cast<PFN_vkVoidFunction>(FreeMemory)},
12008        {"vkBindBufferMemory", reinterpret_cast<PFN_vkVoidFunction>(BindBufferMemory)},
12009        {"vkGetBufferMemoryRequirements", reinterpret_cast<PFN_vkVoidFunction>(GetBufferMemoryRequirements)},
12010        {"vkGetImageMemoryRequirements", reinterpret_cast<PFN_vkVoidFunction>(GetImageMemoryRequirements)},
12011        {"vkGetQueryPoolResults", reinterpret_cast<PFN_vkVoidFunction>(GetQueryPoolResults)},
12012        {"vkBindImageMemory", reinterpret_cast<PFN_vkVoidFunction>(BindImageMemory)},
12013        {"vkQueueBindSparse", reinterpret_cast<PFN_vkVoidFunction>(QueueBindSparse)},
12014        {"vkCreateSemaphore", reinterpret_cast<PFN_vkVoidFunction>(CreateSemaphore)},
12015        {"vkCreateEvent", reinterpret_cast<PFN_vkVoidFunction>(CreateEvent)},
12016    };
12017
12018    for (size_t i = 0; i < ARRAY_SIZE(core_device_commands); i++) {
12019        if (!strcmp(core_device_commands[i].name, name)) return core_device_commands[i].proc;
12020    }
12021
12022    return nullptr;
12023}
12024
12025static PFN_vkVoidFunction intercept_khr_swapchain_command(const char *name, VkDevice dev) {
12026    static const struct {
12027        const char *name;
12028        PFN_vkVoidFunction proc;
12029    } khr_swapchain_commands[] = {
12030        {"vkCreateSwapchainKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateSwapchainKHR)},
12031        {"vkDestroySwapchainKHR", reinterpret_cast<PFN_vkVoidFunction>(DestroySwapchainKHR)},
12032        {"vkGetSwapchainImagesKHR", reinterpret_cast<PFN_vkVoidFunction>(GetSwapchainImagesKHR)},
12033        {"vkAcquireNextImageKHR", reinterpret_cast<PFN_vkVoidFunction>(AcquireNextImageKHR)},
12034        {"vkQueuePresentKHR", reinterpret_cast<PFN_vkVoidFunction>(QueuePresentKHR)},
12035    };
12036    layer_data *dev_data = nullptr;
12037
12038    if (dev) {
12039        dev_data = GetLayerDataPtr(get_dispatch_key(dev), layer_data_map);
12040        if (!dev_data->device_extensions.wsi_enabled) return nullptr;
12041    }
12042
12043    for (size_t i = 0; i < ARRAY_SIZE(khr_swapchain_commands); i++) {
12044        if (!strcmp(khr_swapchain_commands[i].name, name)) return khr_swapchain_commands[i].proc;
12045    }
12046
12047    if (dev_data) {
12048        if (!dev_data->device_extensions.wsi_display_swapchain_enabled) return nullptr;
12049    }
12050
12051    if (!strcmp("vkCreateSharedSwapchainsKHR", name)) return reinterpret_cast<PFN_vkVoidFunction>(CreateSharedSwapchainsKHR);
12052
12053    return nullptr;
12054}
12055
12056static PFN_vkVoidFunction intercept_khr_surface_command(const char *name, VkInstance instance) {
12057    static const struct {
12058        const char *name;
12059        PFN_vkVoidFunction proc;
12060        bool instance_layer_data::*enable;
12061    } khr_surface_commands[] = {
12062#ifdef VK_USE_PLATFORM_ANDROID_KHR
12063        {"vkCreateAndroidSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateAndroidSurfaceKHR),
12064         &instance_layer_data::androidSurfaceExtensionEnabled},
12065#endif  // VK_USE_PLATFORM_ANDROID_KHR
12066#ifdef VK_USE_PLATFORM_MIR_KHR
12067        {"vkCreateMirSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateMirSurfaceKHR),
12068         &instance_layer_data::mirSurfaceExtensionEnabled},
12069#endif  // VK_USE_PLATFORM_MIR_KHR
12070#ifdef VK_USE_PLATFORM_WAYLAND_KHR
12071        {"vkCreateWaylandSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateWaylandSurfaceKHR),
12072         &instance_layer_data::waylandSurfaceExtensionEnabled},
12073#endif  // VK_USE_PLATFORM_WAYLAND_KHR
12074#ifdef VK_USE_PLATFORM_WIN32_KHR
12075        {"vkCreateWin32SurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateWin32SurfaceKHR),
12076         &instance_layer_data::win32SurfaceExtensionEnabled},
12077#endif  // VK_USE_PLATFORM_WIN32_KHR
12078#ifdef VK_USE_PLATFORM_XCB_KHR
12079        {"vkCreateXcbSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateXcbSurfaceKHR),
12080         &instance_layer_data::xcbSurfaceExtensionEnabled},
12081#endif  // VK_USE_PLATFORM_XCB_KHR
12082#ifdef VK_USE_PLATFORM_XLIB_KHR
12083        {"vkCreateXlibSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateXlibSurfaceKHR),
12084         &instance_layer_data::xlibSurfaceExtensionEnabled},
12085#endif  // VK_USE_PLATFORM_XLIB_KHR
12086        {"vkCreateDisplayPlaneSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateDisplayPlaneSurfaceKHR),
12087         &instance_layer_data::displayExtensionEnabled},
12088        {"vkDestroySurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(DestroySurfaceKHR),
12089         &instance_layer_data::surfaceExtensionEnabled},
12090        {"vkGetPhysicalDeviceSurfaceCapabilitiesKHR", reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceSurfaceCapabilitiesKHR),
12091         &instance_layer_data::surfaceExtensionEnabled},
12092        {"vkGetPhysicalDeviceSurfaceSupportKHR", reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceSurfaceSupportKHR),
12093         &instance_layer_data::surfaceExtensionEnabled},
12094        {"vkGetPhysicalDeviceSurfacePresentModesKHR", reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceSurfacePresentModesKHR),
12095         &instance_layer_data::surfaceExtensionEnabled},
12096        {"vkGetPhysicalDeviceSurfaceFormatsKHR", reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceSurfaceFormatsKHR),
12097         &instance_layer_data::surfaceExtensionEnabled},
12098    };
12099
12100    instance_layer_data *instance_data = nullptr;
12101    if (instance) {
12102        instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
12103    }
12104
12105    for (size_t i = 0; i < ARRAY_SIZE(khr_surface_commands); i++) {
12106        if (!strcmp(khr_surface_commands[i].name, name)) {
12107            if (instance_data && !(instance_data->*(khr_surface_commands[i].enable))) return nullptr;
12108            return khr_surface_commands[i].proc;
12109        }
12110    }
12111
12112    return nullptr;
12113}
12114
12115static PFN_vkVoidFunction intercept_extension_instance_commands(const char *name, VkInstance instance) {
12116    static const struct {
12117        const char *name;
12118        PFN_vkVoidFunction proc;
12119        bool instance_layer_data::*enable;
12120    } instance_extension_commands[] = {
12121        {"vkGetPhysicalDeviceQueueFamilyProperties2KHR",
12122         reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceQueueFamilyProperties2KHR)},
12123    };
12124
12125    for (size_t i = 0; i < ARRAY_SIZE(instance_extension_commands); i++) {
12126        if (!strcmp(instance_extension_commands[i].name, name)) {
12127            return instance_extension_commands[i].proc;
12128        }
12129    }
12130    return nullptr;
12131}
12132
12133}  // namespace core_validation
12134
12135// vk_layer_logging.h expects these to be defined
12136
12137VKAPI_ATTR VkResult VKAPI_CALL vkCreateDebugReportCallbackEXT(VkInstance instance,
12138                                                              const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
12139                                                              const VkAllocationCallbacks *pAllocator,
12140                                                              VkDebugReportCallbackEXT *pMsgCallback) {
12141    return core_validation::CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
12142}
12143
12144VKAPI_ATTR void VKAPI_CALL vkDestroyDebugReportCallbackEXT(VkInstance instance, VkDebugReportCallbackEXT msgCallback,
12145                                                           const VkAllocationCallbacks *pAllocator) {
12146    core_validation::DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
12147}
12148
12149VKAPI_ATTR void VKAPI_CALL vkDebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags,
12150                                                   VkDebugReportObjectTypeEXT objType, uint64_t object, size_t location,
12151                                                   int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
12152    core_validation::DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, pMsg);
12153}
12154
12155// loader-layer interface v0, just wrappers since there is only a layer
12156
12157VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount,
12158                                                                                      VkExtensionProperties *pProperties) {
12159    return core_validation::EnumerateInstanceExtensionProperties(pLayerName, pCount, pProperties);
12160}
12161
12162VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceLayerProperties(uint32_t *pCount,
12163                                                                                  VkLayerProperties *pProperties) {
12164    return core_validation::EnumerateInstanceLayerProperties(pCount, pProperties);
12165}
12166
12167VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
12168                                                                                VkLayerProperties *pProperties) {
12169    // the layer command handles VK_NULL_HANDLE just fine internally
12170    assert(physicalDevice == VK_NULL_HANDLE);
12171    return core_validation::EnumerateDeviceLayerProperties(VK_NULL_HANDLE, pCount, pProperties);
12172}
12173
12174VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
12175                                                                                    const char *pLayerName, uint32_t *pCount,
12176                                                                                    VkExtensionProperties *pProperties) {
12177    // the layer command handles VK_NULL_HANDLE just fine internally
12178    assert(physicalDevice == VK_NULL_HANDLE);
12179    return core_validation::EnumerateDeviceExtensionProperties(VK_NULL_HANDLE, pLayerName, pCount, pProperties);
12180}
12181
12182VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) {
12183    return core_validation::GetDeviceProcAddr(dev, funcName);
12184}
12185
12186VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) {
12187    return core_validation::GetInstanceProcAddr(instance, funcName);
12188}
12189
12190VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_layerGetPhysicalDeviceProcAddr(VkInstance instance,
12191                                                                                           const char *funcName) {
12192    return core_validation::GetPhysicalDeviceProcAddr(instance, funcName);
12193}
12194
12195VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkNegotiateLoaderLayerInterfaceVersion(VkNegotiateLayerInterface *pVersionStruct) {
12196    assert(pVersionStruct != NULL);
12197    assert(pVersionStruct->sType == LAYER_NEGOTIATE_INTERFACE_STRUCT);
12198
12199    // Fill in the function pointers if our version is at least capable of having the structure contain them.
12200    if (pVersionStruct->loaderLayerInterfaceVersion >= 2) {
12201        pVersionStruct->pfnGetInstanceProcAddr = vkGetInstanceProcAddr;
12202        pVersionStruct->pfnGetDeviceProcAddr = vkGetDeviceProcAddr;
12203        pVersionStruct->pfnGetPhysicalDeviceProcAddr = vk_layerGetPhysicalDeviceProcAddr;
12204    }
12205
12206    if (pVersionStruct->loaderLayerInterfaceVersion < CURRENT_LOADER_LAYER_INTERFACE_VERSION) {
12207        core_validation::loader_layer_if_version = pVersionStruct->loaderLayerInterfaceVersion;
12208    } else if (pVersionStruct->loaderLayerInterfaceVersion > CURRENT_LOADER_LAYER_INTERFACE_VERSION) {
12209        pVersionStruct->loaderLayerInterfaceVersion = CURRENT_LOADER_LAYER_INTERFACE_VERSION;
12210    }
12211
12212    return VK_SUCCESS;
12213}
12214