core_validation.cpp revision c54e405a4ce05f4de10bb78bfc3a4769c41d2d59
1/* Copyright (c) 2015-2016 The Khronos Group Inc.
2 * Copyright (c) 2015-2016 Valve Corporation
3 * Copyright (c) 2015-2016 LunarG, Inc.
4 * Copyright (C) 2015-2016 Google Inc.
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 *     http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * Author: Cody Northrop <cnorthrop@google.com>
19 * Author: Michael Lentine <mlentine@google.com>
20 * Author: Tobin Ehlis <tobine@google.com>
21 * Author: Chia-I Wu <olv@google.com>
22 * Author: Chris Forbes <chrisf@ijw.co.nz>
23 * Author: Mark Lobodzinski <mark@lunarg.com>
24 * Author: Ian Elliott <ianelliott@google.com>
25 */
26
27// Allow use of STL min and max functions in Windows
28#define NOMINMAX
29
30// Turn on mem_tracker merged code
31#define MTMERGESOURCE 1
32
33#include <SPIRV/spirv.hpp>
34#include <algorithm>
35#include <assert.h>
36#include <iostream>
37#include <list>
38#include <map>
39#include <mutex>
40#include <set>
41//#include <memory>
42#include <stdio.h>
43#include <stdlib.h>
44#include <string.h>
45#include <string>
46#include <tuple>
47
48#include "vk_loader_platform.h"
49#include "vk_dispatch_table_helper.h"
50#include "vk_struct_string_helper_cpp.h"
51#if defined(__GNUC__)
52#pragma GCC diagnostic ignored "-Wwrite-strings"
53#endif
54#if defined(__GNUC__)
55#pragma GCC diagnostic warning "-Wwrite-strings"
56#endif
57#include "vk_struct_size_helper.h"
58#include "core_validation.h"
59#include "vk_layer_table.h"
60#include "vk_layer_data.h"
61#include "vk_layer_extension_utils.h"
62#include "vk_layer_utils.h"
63#include "spirv-tools/libspirv.h"
64
65#if defined __ANDROID__
66#include <android/log.h>
67#define LOGCONSOLE(...) ((void)__android_log_print(ANDROID_LOG_INFO, "DS", __VA_ARGS__))
68#else
69#define LOGCONSOLE(...)                                                                                                            \
70    {                                                                                                                              \
71        printf(__VA_ARGS__);                                                                                                       \
72        printf("\n");                                                                                                              \
73    }
74#endif
75
76using namespace std;
77
78namespace core_validation {
79
80using std::unordered_map;
81using std::unordered_set;
82
83// WSI Image Objects bypass usual Image Object creation methods.  A special Memory
84// Object value will be used to identify them internally.
85static const VkDeviceMemory MEMTRACKER_SWAP_CHAIN_IMAGE_KEY = (VkDeviceMemory)(-1);
86// 2nd special memory handle used to flag object as unbound from memory
87static const VkDeviceMemory MEMORY_UNBOUND = VkDeviceMemory(~((uint64_t)(0)) - 1);
88
89struct devExts {
90    bool wsi_enabled;
91    bool wsi_display_swapchain_enabled;
92    unordered_map<VkSwapchainKHR, unique_ptr<SWAPCHAIN_NODE>> swapchainMap;
93    unordered_map<VkImage, VkSwapchainKHR> imageToSwapchainMap;
94};
95
96// fwd decls
97struct shader_module;
98
99struct instance_layer_data {
100    VkInstance instance = VK_NULL_HANDLE;
101    unique_ptr<INSTANCE_STATE> instance_state = nullptr;
102    debug_report_data *report_data = nullptr;
103    std::vector<VkDebugReportCallbackEXT> logging_callback;
104    VkLayerInstanceDispatchTable dispatch_table;
105
106    unordered_map<VkPhysicalDevice, PHYSICAL_DEVICE_STATE> physical_device_map;
107    unordered_map<VkSurfaceKHR, SURFACE_STATE> surface_map;
108
109    bool surfaceExtensionEnabled = false;
110    bool displayExtensionEnabled = false;
111#ifdef VK_USE_PLATFORM_ANDROID_KHR
112    bool androidSurfaceExtensionEnabled = false;
113#endif
114#ifdef VK_USE_PLATFORM_MIR_KHR
115    bool mirSurfaceExtensionEnabled = false;
116#endif
117#ifdef VK_USE_PLATFORM_WAYLAND_KHR
118    bool waylandSurfaceExtensionEnabled = false;
119#endif
120#ifdef VK_USE_PLATFORM_WIN32_KHR
121    bool win32SurfaceExtensionEnabled = false;
122#endif
123#ifdef VK_USE_PLATFORM_XCB_KHR
124    bool xcbSurfaceExtensionEnabled = false;
125#endif
126#ifdef VK_USE_PLATFORM_XLIB_KHR
127    bool xlibSurfaceExtensionEnabled = false;
128#endif
129};
130
131struct layer_data {
132    debug_report_data *report_data = nullptr;
133    VkLayerDispatchTable dispatch_table;
134    unique_ptr<INSTANCE_STATE> instance_state = nullptr;
135
136    devExts device_extensions = {};
137    unordered_set<VkQueue> queues;  // All queues under given device
138    // Global set of all cmdBuffers that are inFlight on this device
139    unordered_set<VkCommandBuffer> globalInFlightCmdBuffers;
140    // Layer specific data
141    unordered_map<VkSampler, unique_ptr<SAMPLER_NODE>> samplerMap;
142    unordered_map<VkImageView, unique_ptr<IMAGE_VIEW_STATE>> imageViewMap;
143    unordered_map<VkImage, unique_ptr<IMAGE_NODE>> imageMap;
144    unordered_map<VkBufferView, unique_ptr<BUFFER_VIEW_STATE>> bufferViewMap;
145    unordered_map<VkBuffer, unique_ptr<BUFFER_NODE>> bufferMap;
146    unordered_map<VkPipeline, PIPELINE_STATE *> pipelineMap;
147    unordered_map<VkCommandPool, COMMAND_POOL_NODE> commandPoolMap;
148    unordered_map<VkDescriptorPool, DESCRIPTOR_POOL_NODE *> descriptorPoolMap;
149    unordered_map<VkDescriptorSet, cvdescriptorset::DescriptorSet *> setMap;
150    unordered_map<VkDescriptorSetLayout, cvdescriptorset::DescriptorSetLayout *> descriptorSetLayoutMap;
151    unordered_map<VkPipelineLayout, PIPELINE_LAYOUT_NODE> pipelineLayoutMap;
152    unordered_map<VkDeviceMemory, unique_ptr<DEVICE_MEM_INFO>> memObjMap;
153    unordered_map<VkFence, FENCE_NODE> fenceMap;
154    unordered_map<VkQueue, QUEUE_NODE> queueMap;
155    unordered_map<VkEvent, EVENT_NODE> eventMap;
156    unordered_map<QueryObject, bool> queryToStateMap;
157    unordered_map<VkQueryPool, QUERY_POOL_NODE> queryPoolMap;
158    unordered_map<VkSemaphore, SEMAPHORE_NODE> semaphoreMap;
159    unordered_map<VkCommandBuffer, GLOBAL_CB_NODE *> commandBufferMap;
160    unordered_map<VkFramebuffer, unique_ptr<FRAMEBUFFER_STATE>> frameBufferMap;
161    unordered_map<VkImage, vector<ImageSubresourcePair>> imageSubresourceMap;
162    unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> imageLayoutMap;
163    unordered_map<VkRenderPass, unique_ptr<RENDER_PASS_NODE>> renderPassMap;
164    unordered_map<VkShaderModule, unique_ptr<shader_module>> shaderModuleMap;
165    VkDevice device = VK_NULL_HANDLE;
166
167    instance_layer_data *instance_data = nullptr;  // from device to enclosing instance
168
169    VkPhysicalDeviceFeatures enabled_features = {};
170    // Device specific data
171    PHYS_DEV_PROPERTIES_NODE phys_dev_properties = {};
172    VkPhysicalDeviceMemoryProperties phys_dev_mem_props = {};
173};
174
175// TODO : Do we need to guard access to layer_data_map w/ lock?
176static unordered_map<void *, layer_data *> layer_data_map;
177static unordered_map<void *, instance_layer_data *> instance_layer_data_map;
178
179static const VkLayerProperties global_layer = {
180    "VK_LAYER_LUNARG_core_validation", VK_LAYER_API_VERSION, 1, "LunarG Validation Layer",
181};
182
183template <class TCreateInfo> void ValidateLayerOrdering(const TCreateInfo &createInfo) {
184    bool foundLayer = false;
185    for (uint32_t i = 0; i < createInfo.enabledLayerCount; ++i) {
186        if (!strcmp(createInfo.ppEnabledLayerNames[i], global_layer.layerName)) {
187            foundLayer = true;
188        }
189        // This has to be logged to console as we don't have a callback at this point.
190        if (!foundLayer && !strcmp(createInfo.ppEnabledLayerNames[0], "VK_LAYER_GOOGLE_unique_objects")) {
191            LOGCONSOLE("Cannot activate layer VK_LAYER_GOOGLE_unique_objects prior to activating %s.",
192                       global_layer.layerName);
193        }
194    }
195}
196
197// Code imported from shader_checker
198static void build_def_index(shader_module *);
199
200// A forward iterator over spirv instructions. Provides easy access to len, opcode, and content words
201// without the caller needing to care too much about the physical SPIRV module layout.
202struct spirv_inst_iter {
203    std::vector<uint32_t>::const_iterator zero;
204    std::vector<uint32_t>::const_iterator it;
205
206    uint32_t len() {
207        auto result = *it >> 16;
208        assert(result > 0);
209        return result;
210    }
211
212    uint32_t opcode() { return *it & 0x0ffffu; }
213
214    uint32_t const &word(unsigned n) {
215        assert(n < len());
216        return it[n];
217    }
218
219    uint32_t offset() { return (uint32_t)(it - zero); }
220
221    spirv_inst_iter() {}
222
223    spirv_inst_iter(std::vector<uint32_t>::const_iterator zero, std::vector<uint32_t>::const_iterator it) : zero(zero), it(it) {}
224
225    bool operator==(spirv_inst_iter const &other) { return it == other.it; }
226
227    bool operator!=(spirv_inst_iter const &other) { return it != other.it; }
228
229    spirv_inst_iter operator++(int) { /* x++ */
230        spirv_inst_iter ii = *this;
231        it += len();
232        return ii;
233    }
234
235    spirv_inst_iter operator++() { /* ++x; */
236        it += len();
237        return *this;
238    }
239
240    /* The iterator and the value are the same thing. */
241    spirv_inst_iter &operator*() { return *this; }
242    spirv_inst_iter const &operator*() const { return *this; }
243};
244
245struct shader_module {
246    /* the spirv image itself */
247    vector<uint32_t> words;
248    /* a mapping of <id> to the first word of its def. this is useful because walking type
249     * trees, constant expressions, etc requires jumping all over the instruction stream.
250     */
251    unordered_map<unsigned, unsigned> def_index;
252
253    shader_module(VkShaderModuleCreateInfo const *pCreateInfo)
254        : words((uint32_t *)pCreateInfo->pCode, (uint32_t *)pCreateInfo->pCode + pCreateInfo->codeSize / sizeof(uint32_t)),
255          def_index() {
256
257        build_def_index(this);
258    }
259
260    /* expose begin() / end() to enable range-based for */
261    spirv_inst_iter begin() const { return spirv_inst_iter(words.begin(), words.begin() + 5); } /* first insn */
262    spirv_inst_iter end() const { return spirv_inst_iter(words.begin(), words.end()); }         /* just past last insn */
263    /* given an offset into the module, produce an iterator there. */
264    spirv_inst_iter at(unsigned offset) const { return spirv_inst_iter(words.begin(), words.begin() + offset); }
265
266    /* gets an iterator to the definition of an id */
267    spirv_inst_iter get_def(unsigned id) const {
268        auto it = def_index.find(id);
269        if (it == def_index.end()) {
270            return end();
271        }
272        return at(it->second);
273    }
274};
275
276// TODO : This can be much smarter, using separate locks for separate global data
277static std::mutex global_lock;
278
279// Return IMAGE_VIEW_STATE ptr for specified imageView or else NULL
280IMAGE_VIEW_STATE *getImageViewState(const layer_data *dev_data, VkImageView image_view) {
281    auto iv_it = dev_data->imageViewMap.find(image_view);
282    if (iv_it == dev_data->imageViewMap.end()) {
283        return nullptr;
284    }
285    return iv_it->second.get();
286}
287// Return sampler node ptr for specified sampler or else NULL
288SAMPLER_NODE *getSamplerNode(const layer_data *dev_data, VkSampler sampler) {
289    auto sampler_it = dev_data->samplerMap.find(sampler);
290    if (sampler_it == dev_data->samplerMap.end()) {
291        return nullptr;
292    }
293    return sampler_it->second.get();
294}
295// Return image node ptr for specified image or else NULL
296IMAGE_NODE *getImageNode(const layer_data *dev_data, VkImage image) {
297    auto img_it = dev_data->imageMap.find(image);
298    if (img_it == dev_data->imageMap.end()) {
299        return nullptr;
300    }
301    return img_it->second.get();
302}
303// Return buffer node ptr for specified buffer or else NULL
304BUFFER_NODE *getBufferNode(const layer_data *dev_data, VkBuffer buffer) {
305    auto buff_it = dev_data->bufferMap.find(buffer);
306    if (buff_it == dev_data->bufferMap.end()) {
307        return nullptr;
308    }
309    return buff_it->second.get();
310}
311// Return swapchain node for specified swapchain or else NULL
312SWAPCHAIN_NODE *getSwapchainNode(const layer_data *dev_data, VkSwapchainKHR swapchain) {
313    auto swp_it = dev_data->device_extensions.swapchainMap.find(swapchain);
314    if (swp_it == dev_data->device_extensions.swapchainMap.end()) {
315        return nullptr;
316    }
317    return swp_it->second.get();
318}
319// Return swapchain for specified image or else NULL
320VkSwapchainKHR getSwapchainFromImage(const layer_data *dev_data, VkImage image) {
321    auto img_it = dev_data->device_extensions.imageToSwapchainMap.find(image);
322    if (img_it == dev_data->device_extensions.imageToSwapchainMap.end()) {
323        return VK_NULL_HANDLE;
324    }
325    return img_it->second;
326}
327// Return buffer node ptr for specified buffer or else NULL
328BUFFER_VIEW_STATE *getBufferViewState(const layer_data *my_data, VkBufferView buffer_view) {
329    auto bv_it = my_data->bufferViewMap.find(buffer_view);
330    if (bv_it == my_data->bufferViewMap.end()) {
331        return nullptr;
332    }
333    return bv_it->second.get();
334}
335
336FENCE_NODE *getFenceNode(layer_data *dev_data, VkFence fence) {
337    auto it = dev_data->fenceMap.find(fence);
338    if (it == dev_data->fenceMap.end()) {
339        return nullptr;
340    }
341    return &it->second;
342}
343
344EVENT_NODE *getEventNode(layer_data *dev_data, VkEvent event) {
345    auto it = dev_data->eventMap.find(event);
346    if (it == dev_data->eventMap.end()) {
347        return nullptr;
348    }
349    return &it->second;
350}
351
352QUERY_POOL_NODE *getQueryPoolNode(layer_data *dev_data, VkQueryPool query_pool) {
353    auto it = dev_data->queryPoolMap.find(query_pool);
354    if (it == dev_data->queryPoolMap.end()) {
355        return nullptr;
356    }
357    return &it->second;
358}
359
360QUEUE_NODE *getQueueNode(layer_data *dev_data, VkQueue queue) {
361    auto it = dev_data->queueMap.find(queue);
362    if (it == dev_data->queueMap.end()) {
363        return nullptr;
364    }
365    return &it->second;
366}
367
368SEMAPHORE_NODE *getSemaphoreNode(layer_data *dev_data, VkSemaphore semaphore) {
369    auto it = dev_data->semaphoreMap.find(semaphore);
370    if (it == dev_data->semaphoreMap.end()) {
371        return nullptr;
372    }
373    return &it->second;
374}
375
376COMMAND_POOL_NODE *getCommandPoolNode(layer_data *dev_data, VkCommandPool pool) {
377    auto it = dev_data->commandPoolMap.find(pool);
378    if (it == dev_data->commandPoolMap.end()) {
379        return nullptr;
380    }
381    return &it->second;
382}
383
384PHYSICAL_DEVICE_STATE *getPhysicalDeviceState(instance_layer_data *instance_data, VkPhysicalDevice phys) {
385    auto it = instance_data->physical_device_map.find(phys);
386    if (it == instance_data->physical_device_map.end()) {
387        return nullptr;
388    }
389    return &it->second;
390}
391
392SURFACE_STATE *getSurfaceState(instance_layer_data *instance_data, VkSurfaceKHR surface) {
393    auto it = instance_data->surface_map.find(surface);
394    if (it == instance_data->surface_map.end()) {
395        return nullptr;
396    }
397    return &it->second;
398}
399
400// Return ptr to bound memory for given handle of specified type and set sparse param to indicate if binding is sparse
401static VkDeviceMemory *GetObjectMemBinding(layer_data *my_data, uint64_t handle, VkDebugReportObjectTypeEXT type, bool *sparse) {
402    switch (type) {
403    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
404        auto img_node = getImageNode(my_data, VkImage(handle));
405        *sparse = img_node->createInfo.flags & VK_IMAGE_CREATE_SPARSE_BINDING_BIT;
406        if (img_node)
407            return &img_node->mem;
408        break;
409    }
410    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
411        auto buff_node = getBufferNode(my_data, VkBuffer(handle));
412        *sparse = buff_node->createInfo.flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT;
413        if (buff_node)
414            return &buff_node->mem;
415        break;
416    }
417    default:
418        break;
419    }
420    return nullptr;
421}
422// Overloaded version of above function that doesn't care about sparse bool
423static VkDeviceMemory *GetObjectMemBinding(layer_data *my_data, uint64_t handle, VkDebugReportObjectTypeEXT type) {
424    bool sparse;
425    return GetObjectMemBinding(my_data, handle, type, &sparse);
426}
427// prototype
428static GLOBAL_CB_NODE *getCBNode(layer_data const *, const VkCommandBuffer);
429
430// Helper function to validate correct usage bits set for buffers or images
431//  Verify that (actual & desired) flags != 0 or,
432//   if strict is true, verify that (actual & desired) flags == desired
433//  In case of error, report it via dbg callbacks
434static bool validate_usage_flags(layer_data *my_data, VkFlags actual, VkFlags desired, VkBool32 strict,
435                                     uint64_t obj_handle, VkDebugReportObjectTypeEXT obj_type, char const *ty_str,
436                                     char const *func_name, char const *usage_str) {
437    bool correct_usage = false;
438    bool skip_call = false;
439    if (strict)
440        correct_usage = ((actual & desired) == desired);
441    else
442        correct_usage = ((actual & desired) != 0);
443    if (!correct_usage) {
444        skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_type, obj_handle, __LINE__,
445                            MEMTRACK_INVALID_USAGE_FLAG, "MEM", "Invalid usage flag for %s 0x%" PRIxLEAST64
446                                                                " used by %s. In this case, %s should have %s set during creation.",
447                            ty_str, obj_handle, func_name, ty_str, usage_str);
448    }
449    return skip_call;
450}
451
452// Helper function to validate usage flags for buffers
453// For given buffer_node send actual vs. desired usage off to helper above where
454//  an error will be flagged if usage is not correct
455static bool ValidateImageUsageFlags(layer_data *dev_data, IMAGE_NODE const *image_node, VkFlags desired, VkBool32 strict,
456                                    char const *func_name, char const *usage_string) {
457    return validate_usage_flags(dev_data, image_node->createInfo.usage, desired, strict,
458                                reinterpret_cast<const uint64_t &>(image_node->image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
459                                "image", func_name, usage_string);
460}
461
462// Helper function to validate usage flags for buffers
463// For given buffer_node send actual vs. desired usage off to helper above where
464//  an error will be flagged if usage is not correct
465static bool ValidateBufferUsageFlags(layer_data *dev_data, BUFFER_NODE const *buffer_node, VkFlags desired, VkBool32 strict,
466                                     char const *func_name, char const *usage_string) {
467    return validate_usage_flags(dev_data, buffer_node->createInfo.usage, desired, strict,
468                                reinterpret_cast<const uint64_t &>(buffer_node->buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
469                                "buffer", func_name, usage_string);
470}
471
472// Return ptr to info in map container containing mem, or NULL if not found
473//  Calls to this function should be wrapped in mutex
474DEVICE_MEM_INFO *getMemObjInfo(const layer_data *dev_data, const VkDeviceMemory mem) {
475    auto mem_it = dev_data->memObjMap.find(mem);
476    if (mem_it == dev_data->memObjMap.end()) {
477        return NULL;
478    }
479    return mem_it->second.get();
480}
481
482static void add_mem_obj_info(layer_data *my_data, void *object, const VkDeviceMemory mem,
483                             const VkMemoryAllocateInfo *pAllocateInfo) {
484    assert(object != NULL);
485
486    my_data->memObjMap[mem] = unique_ptr<DEVICE_MEM_INFO>(new DEVICE_MEM_INFO(object, mem, pAllocateInfo));
487}
488
489// Helper function to print lowercase string of object type
490//  TODO: Unify string helper functions, this should really come out of a string helper if not there already
491static const char *object_type_to_string(VkDebugReportObjectTypeEXT type) {
492    switch (type) {
493    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT:
494        return "image";
495    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
496        return "buffer";
497    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT:
498        return "image view";
499    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT:
500        return "buffer view";
501    case VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT:
502        return "swapchain";
503    case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT:
504        return "descriptor set";
505    case VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT:
506        return "framebuffer";
507    case VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT:
508        return "event";
509    case VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT:
510        return "query pool";
511    case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT:
512        return "descriptor pool";
513    case VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT:
514        return "command pool";
515    case VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT:
516        return "pipeline";
517    case VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT:
518        return "sampler";
519    case VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT:
520        return "renderpass";
521    case VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT:
522        return "device memory";
523    case VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT:
524        return "semaphore";
525    default:
526        return "unknown";
527    }
528}
529
530// For given bound_object_handle, bound to given mem allocation, verify that the range for the bound object is valid
531static bool ValidateMemoryIsValid(layer_data *dev_data, VkDeviceMemory mem, uint64_t bound_object_handle,
532                                  VkDebugReportObjectTypeEXT type, const char *functionName) {
533    DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem);
534    if (mem_info) {
535        if (!mem_info->bound_ranges[bound_object_handle].valid) {
536            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
537                           reinterpret_cast<uint64_t &>(mem), __LINE__, MEMTRACK_INVALID_MEM_REGION, "MEM",
538                           "%s: Cannot read invalid region of memory allocation 0x%" PRIx64 " for bound %s object 0x%" PRIx64
539                           ", please fill the memory before using.",
540                           functionName, reinterpret_cast<uint64_t &>(mem), object_type_to_string(type), bound_object_handle);
541        }
542    }
543    return false;
544}
545// For given image_node
546//  If mem is special swapchain key, then verify that image_node valid member is true
547//  Else verify that the image's bound memory range is valid
548static bool ValidateImageMemoryIsValid(layer_data *dev_data, IMAGE_NODE *image_node, const char *functionName) {
549    if (image_node->mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
550        if (!image_node->valid) {
551            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
552                           reinterpret_cast<uint64_t &>(image_node->mem), __LINE__, MEMTRACK_INVALID_MEM_REGION, "MEM",
553                           "%s: Cannot read invalid swapchain image 0x%" PRIx64 ", please fill the memory before using.",
554                           functionName, reinterpret_cast<uint64_t &>(image_node->image));
555        }
556    } else {
557        return ValidateMemoryIsValid(dev_data, image_node->mem, reinterpret_cast<uint64_t &>(image_node->image),
558                                     VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, functionName);
559    }
560    return false;
561}
562// For given buffer_node, verify that the range it's bound to is valid
563static bool ValidateBufferMemoryIsValid(layer_data *dev_data, BUFFER_NODE *buffer_node, const char *functionName) {
564    return ValidateMemoryIsValid(dev_data, buffer_node->mem, reinterpret_cast<uint64_t &>(buffer_node->buffer),
565                                 VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, functionName);
566}
567// For the given memory allocation, set the range bound by the given handle object to the valid param value
568static void SetMemoryValid(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, bool valid) {
569    DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem);
570    if (mem_info) {
571        mem_info->bound_ranges[handle].valid = valid;
572    }
573}
574// For given image node
575//  If mem is special swapchain key, then set entire image_node to valid param value
576//  Else set the image's bound memory range to valid param value
577static void SetImageMemoryValid(layer_data *dev_data, IMAGE_NODE *image_node, bool valid) {
578    if (image_node->mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
579        image_node->valid = valid;
580    } else {
581        SetMemoryValid(dev_data, image_node->mem, reinterpret_cast<uint64_t &>(image_node->image), valid);
582    }
583}
584// For given buffer node set the buffer's bound memory range to valid param value
585static void SetBufferMemoryValid(layer_data *dev_data, BUFFER_NODE *buffer_node, bool valid) {
586    SetMemoryValid(dev_data, buffer_node->mem, reinterpret_cast<uint64_t &>(buffer_node->buffer), valid);
587}
588// Find CB Info and add mem reference to list container
589// Find Mem Obj Info and add CB reference to list container
590static bool update_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb, const VkDeviceMemory mem,
591                                              const char *apiName) {
592    bool skip_call = false;
593
594    // Skip validation if this image was created through WSI
595    if (mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
596
597        // First update CB binding in MemObj mini CB list
598        DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, mem);
599        if (pMemInfo) {
600            pMemInfo->command_buffer_bindings.insert(cb);
601            // Now update CBInfo's Mem reference list
602            GLOBAL_CB_NODE *pCBNode = getCBNode(dev_data, cb);
603            // TODO: keep track of all destroyed CBs so we know if this is a stale or simply invalid object
604            if (pCBNode) {
605                pCBNode->memObjs.insert(mem);
606            }
607        }
608    }
609    return skip_call;
610}
611
612// Create binding link between given sampler and command buffer node
613void AddCommandBufferBindingSampler(GLOBAL_CB_NODE *cb_node, SAMPLER_NODE *sampler_node) {
614    sampler_node->cb_bindings.insert(cb_node);
615    cb_node->object_bindings.insert({reinterpret_cast<uint64_t &>(sampler_node->sampler), VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT});
616}
617
618// Create binding link between given image node and command buffer node
619void AddCommandBufferBindingImage(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, IMAGE_NODE *img_node) {
620    // Skip validation if this image was created through WSI
621    if (img_node->mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
622        // First update CB binding in MemObj mini CB list
623        DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, img_node->mem);
624        if (pMemInfo) {
625            pMemInfo->command_buffer_bindings.insert(cb_node->commandBuffer);
626            // Now update CBInfo's Mem reference list
627            cb_node->memObjs.insert(img_node->mem);
628        }
629        // Now update cb binding for image
630        cb_node->object_bindings.insert({reinterpret_cast<uint64_t &>(img_node->image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT});
631        img_node->cb_bindings.insert(cb_node);
632    }
633}
634
635// Create binding link between given image view node and its image with command buffer node
636void AddCommandBufferBindingImageView(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, IMAGE_VIEW_STATE *view_state) {
637    // First add bindings for imageView
638    view_state->cb_bindings.insert(cb_node);
639    cb_node->object_bindings.insert(
640        {reinterpret_cast<uint64_t &>(view_state->image_view), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT});
641    auto image_node = getImageNode(dev_data, view_state->create_info.image);
642    // Add bindings for image within imageView
643    if (image_node) {
644        AddCommandBufferBindingImage(dev_data, cb_node, image_node);
645    }
646}
647
648// Create binding link between given buffer node and command buffer node
649void AddCommandBufferBindingBuffer(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, BUFFER_NODE *buff_node) {
650    // First update CB binding in MemObj mini CB list
651    DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, buff_node->mem);
652    if (pMemInfo) {
653        pMemInfo->command_buffer_bindings.insert(cb_node->commandBuffer);
654        // Now update CBInfo's Mem reference list
655        cb_node->memObjs.insert(buff_node->mem);
656        cb_node->object_bindings.insert({reinterpret_cast<uint64_t &>(buff_node->buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT});
657    }
658    // Now update cb binding for buffer
659    buff_node->cb_bindings.insert(cb_node);
660}
661
662// Create binding link between given buffer view node and its buffer with command buffer node
663void AddCommandBufferBindingBufferView(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, BUFFER_VIEW_STATE *view_state) {
664    // First add bindings for bufferView
665    view_state->cb_bindings.insert(cb_node);
666    cb_node->object_bindings.insert(
667        {reinterpret_cast<uint64_t &>(view_state->buffer_view), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT});
668    auto buffer_node = getBufferNode(dev_data, view_state->create_info.buffer);
669    // Add bindings for buffer within bufferView
670    if (buffer_node) {
671        AddCommandBufferBindingBuffer(dev_data, cb_node, buffer_node);
672    }
673}
674
675// For every mem obj bound to particular CB, free bindings related to that CB
676static void clear_cmd_buf_and_mem_references(layer_data *dev_data, GLOBAL_CB_NODE *pCBNode) {
677    if (pCBNode) {
678        if (pCBNode->memObjs.size() > 0) {
679            for (auto mem : pCBNode->memObjs) {
680                DEVICE_MEM_INFO *pInfo = getMemObjInfo(dev_data, mem);
681                if (pInfo) {
682                    pInfo->command_buffer_bindings.erase(pCBNode->commandBuffer);
683                }
684            }
685            pCBNode->memObjs.clear();
686        }
687        pCBNode->validate_functions.clear();
688    }
689}
690// Overloaded call to above function when GLOBAL_CB_NODE has not already been looked-up
691static void clear_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb) {
692    clear_cmd_buf_and_mem_references(dev_data, getCBNode(dev_data, cb));
693}
694
695// For given MemObjInfo, report Obj & CB bindings. Clear any object bindings.
696static bool ReportMemReferencesAndCleanUp(layer_data *dev_data, DEVICE_MEM_INFO *pMemObjInfo) {
697    bool skip_call = false;
698    size_t cmdBufRefCount = pMemObjInfo->command_buffer_bindings.size();
699    size_t objRefCount = pMemObjInfo->obj_bindings.size();
700
701    if ((pMemObjInfo->command_buffer_bindings.size()) != 0) {
702        skip_call = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
703                            (uint64_t)pMemObjInfo->mem, __LINE__, MEMTRACK_FREED_MEM_REF, "MEM",
704                            "Attempting to free memory object 0x%" PRIxLEAST64 " which still contains " PRINTF_SIZE_T_SPECIFIER
705                            " references",
706                            (uint64_t)pMemObjInfo->mem, (cmdBufRefCount + objRefCount));
707    }
708
709    if (cmdBufRefCount > 0 && pMemObjInfo->command_buffer_bindings.size() > 0) {
710        for (auto cb : pMemObjInfo->command_buffer_bindings) {
711            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
712                    (uint64_t)cb, __LINE__, MEMTRACK_FREED_MEM_REF, "MEM",
713                    "Command Buffer 0x%p still has a reference to mem obj 0x%" PRIxLEAST64, cb, (uint64_t)pMemObjInfo->mem);
714        }
715        // Clear the list of hanging references
716        pMemObjInfo->command_buffer_bindings.clear();
717    }
718
719    if (objRefCount > 0 && pMemObjInfo->obj_bindings.size() > 0) {
720        for (auto obj : pMemObjInfo->obj_bindings) {
721            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, obj.type, obj.handle, __LINE__,
722                    MEMTRACK_FREED_MEM_REF, "MEM", "VK Object 0x%" PRIxLEAST64 " still has a reference to mem obj 0x%" PRIxLEAST64,
723                    obj.handle, (uint64_t)pMemObjInfo->mem);
724            // Clear mem binding for bound objects
725            switch (obj.type) {
726            case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
727                auto image_node = getImageNode(dev_data, reinterpret_cast<VkImage &>(obj.handle));
728                assert(image_node); // Any destroyed images should already be removed from bindings
729                image_node->mem = MEMORY_UNBOUND;
730                break;
731            }
732            case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
733                auto buff_node = getBufferNode(dev_data, reinterpret_cast<VkBuffer &>(obj.handle));
734                assert(buff_node); // Any destroyed buffers should already be removed from bindings
735                buff_node->mem = MEMORY_UNBOUND;
736                break;
737            }
738            default:
739                // Should only have buffer or image objects bound to memory
740                assert(0);
741            }
742        }
743        // Clear the list of hanging references
744        pMemObjInfo->obj_bindings.clear();
745    }
746    return skip_call;
747}
748
749static bool freeMemObjInfo(layer_data *dev_data, void *object, VkDeviceMemory mem, bool internal) {
750    bool skip_call = false;
751    // Parse global list to find info w/ mem
752    DEVICE_MEM_INFO *pInfo = getMemObjInfo(dev_data, mem);
753    if (pInfo) {
754        // TODO: Verify against Valid Use section
755        // Clear any CB bindings for completed CBs
756        //   TODO : Is there a better place to do this?
757
758        assert(pInfo->object != VK_NULL_HANDLE);
759        // clear_cmd_buf_and_mem_references removes elements from
760        // pInfo->command_buffer_bindings -- this copy not needed in c++14,
761        // and probably not needed in practice in c++11
762        auto bindings = pInfo->command_buffer_bindings;
763        for (auto cb : bindings) {
764            if (!dev_data->globalInFlightCmdBuffers.count(cb)) {
765                clear_cmd_buf_and_mem_references(dev_data, cb);
766            }
767        }
768        // Now check for any remaining references to this mem obj and remove bindings
769        if (pInfo->command_buffer_bindings.size() || pInfo->obj_bindings.size()) {
770            skip_call |= ReportMemReferencesAndCleanUp(dev_data, pInfo);
771        }
772        // Delete mem obj info
773        dev_data->memObjMap.erase(dev_data->memObjMap.find(mem));
774    } else if (VK_NULL_HANDLE != mem) {
775        // The request is to free an invalid, non-zero handle
776        skip_call = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
777                            VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
778                            reinterpret_cast<uint64_t &>(mem), __LINE__,
779                            MEMTRACK_INVALID_MEM_OBJ,
780                            "MEM", "Request to delete memory object 0x%"
781                            PRIxLEAST64 " not present in memory Object Map",
782                            reinterpret_cast<uint64_t &>(mem));
783    }
784    return skip_call;
785}
786
787// Remove object binding performs 3 tasks:
788// 1. Remove ObjectInfo from MemObjInfo list container of obj bindings & free it
789// 2. Clear mem binding for image/buffer by setting its handle to 0
790// TODO : This only applied to Buffer, Image, and Swapchain objects now, how should it be updated/customized?
791static bool clear_object_binding(layer_data *dev_data, uint64_t handle, VkDebugReportObjectTypeEXT type) {
792    // TODO : Need to customize images/buffers/swapchains to track mem binding and clear it here appropriately
793    bool skip_call = false;
794    VkDeviceMemory *pMemBinding = GetObjectMemBinding(dev_data, handle, type);
795    if (pMemBinding) {
796        DEVICE_MEM_INFO *pMemObjInfo = getMemObjInfo(dev_data, *pMemBinding);
797        // TODO : Make sure this is a reasonable way to reset mem binding
798        *pMemBinding = VK_NULL_HANDLE;
799        if (pMemObjInfo) {
800            // This obj is bound to a memory object. Remove the reference to this object in that memory object's list,
801            // and set the objects memory binding pointer to NULL.
802            if (!pMemObjInfo->obj_bindings.erase({handle, type})) {
803                skip_call |=
804                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT,
805                            "MEM", "While trying to clear mem binding for %s obj 0x%" PRIxLEAST64
806                                   ", unable to find that object referenced by mem obj 0x%" PRIxLEAST64,
807                            object_type_to_string(type), handle, (uint64_t)pMemObjInfo->mem);
808            }
809        }
810    }
811    return skip_call;
812}
813
814// For given mem object, verify that it is not null or UNBOUND, if it is, report error. Return skip value.
815bool VerifyBoundMemoryIsValid(const layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, const char *api_name,
816                              const char *type_name) {
817    bool result = false;
818    if (VK_NULL_HANDLE == mem) {
819        result = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, handle,
820                         __LINE__, MEMTRACK_OBJECT_NOT_BOUND, "MEM",
821                         "%s: Vk%s object 0x%" PRIxLEAST64 " used with no memory bound. Memory should be bound by calling "
822                         "vkBind%sMemory().",
823                         api_name, type_name, handle, type_name);
824    } else if (MEMORY_UNBOUND == mem) {
825        result = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, handle,
826                         __LINE__, MEMTRACK_OBJECT_NOT_BOUND, "MEM",
827                         "%s: Vk%s object 0x%" PRIxLEAST64 " used with no memory bound and previously bound memory was freed. "
828                         "Memory must not be freed prior to this operation.",
829                         api_name, type_name, handle);
830    }
831    return result;
832}
833
834// Check to see if memory was ever bound to this image
835bool ValidateMemoryIsBoundToImage(const layer_data *dev_data, const IMAGE_NODE *image_node, const char *api_name) {
836    bool result = false;
837    if (0 == (static_cast<uint32_t>(image_node->createInfo.flags) & VK_IMAGE_CREATE_SPARSE_BINDING_BIT)) {
838        result = VerifyBoundMemoryIsValid(dev_data, image_node->mem, reinterpret_cast<const uint64_t &>(image_node->image),
839                                          api_name, "Image");
840    }
841    return result;
842}
843
844// Check to see if memory was bound to this buffer
845bool ValidateMemoryIsBoundToBuffer(const layer_data *dev_data, const BUFFER_NODE *buffer_node, const char *api_name) {
846    bool result = false;
847    if (0 == (static_cast<uint32_t>(buffer_node->createInfo.flags) & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)) {
848        result = VerifyBoundMemoryIsValid(dev_data, buffer_node->mem, reinterpret_cast<const uint64_t &>(buffer_node->buffer),
849                                          api_name, "Buffer");
850    }
851    return result;
852}
853
854// For NULL mem case, output warning
855// Make sure given object is in global object map
856//  IF a previous binding existed, output validation error
857//  Otherwise, add reference from objectInfo to memoryInfo
858//  Add reference off of objInfo
859static bool SetMemBinding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, VkDebugReportObjectTypeEXT type,
860                          const char *apiName) {
861    bool skip_call = false;
862    // Handle NULL case separately, just clear previous binding & decrement reference
863    if (mem == VK_NULL_HANDLE) {
864        skip_call = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_MEM_OBJ,
865                            "MEM", "In %s, attempting to Bind Obj(0x%" PRIxLEAST64 ") to NULL", apiName, handle);
866    } else {
867        bool sparse = false;
868        VkDeviceMemory *mem_binding = GetObjectMemBinding(dev_data, handle, type, &sparse);
869        assert(mem_binding);
870        DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem);
871        if (mem_info) {
872            DEVICE_MEM_INFO *prev_binding = getMemObjInfo(dev_data, *mem_binding);
873            if (prev_binding) {
874                skip_call |=
875                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
876                            reinterpret_cast<uint64_t &>(mem), __LINE__, MEMTRACK_REBIND_OBJECT, "MEM",
877                            "In %s, attempting to bind memory (0x%" PRIxLEAST64 ") to object (0x%" PRIxLEAST64
878                            ") which has already been bound to mem object 0x%" PRIxLEAST64,
879                            apiName, reinterpret_cast<uint64_t &>(mem), handle, reinterpret_cast<uint64_t &>(prev_binding->mem));
880            } else if ((*mem_binding == MEMORY_UNBOUND) && (!sparse)) {
881                skip_call |=
882                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
883                            reinterpret_cast<uint64_t &>(mem), __LINE__, MEMTRACK_REBIND_OBJECT, "MEM",
884                            "In %s, attempting to bind memory (0x%" PRIxLEAST64 ") to object (0x%" PRIxLEAST64
885                            ") which was previous bound to memory that has since been freed. Memory bindings are immutable in "
886                            "Vulkan so this attempt to bind to new memory is not allowed.",
887                            apiName, reinterpret_cast<uint64_t &>(mem), handle);
888            } else {
889                mem_info->obj_bindings.insert({handle, type});
890                // For image objects, make sure default memory state is correctly set
891                // TODO : What's the best/correct way to handle this?
892                if (VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT == type) {
893                    auto const image_node = getImageNode(dev_data, VkImage(handle));
894                    if (image_node) {
895                        VkImageCreateInfo ici = image_node->createInfo;
896                        if (ici.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
897                            // TODO::  More memory state transition stuff.
898                        }
899                    }
900                }
901                *mem_binding = mem;
902            }
903        }
904    }
905    return skip_call;
906}
907
908// For NULL mem case, clear any previous binding Else...
909// Make sure given object is in its object map
910//  IF a previous binding existed, update binding
911//  Add reference from objectInfo to memoryInfo
912//  Add reference off of object's binding info
913// Return VK_TRUE if addition is successful, VK_FALSE otherwise
914static bool set_sparse_mem_binding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle,
915                                       VkDebugReportObjectTypeEXT type, const char *apiName) {
916    bool skip_call = VK_FALSE;
917    // Handle NULL case separately, just clear previous binding & decrement reference
918    if (mem == VK_NULL_HANDLE) {
919        skip_call = clear_object_binding(dev_data, handle, type);
920    } else {
921        VkDeviceMemory *pMemBinding = GetObjectMemBinding(dev_data, handle, type);
922        assert(pMemBinding);
923        DEVICE_MEM_INFO *pInfo = getMemObjInfo(dev_data, mem);
924        if (pInfo) {
925            pInfo->obj_bindings.insert({handle, type});
926            // Need to set mem binding for this object
927            *pMemBinding = mem;
928        }
929    }
930    return skip_call;
931}
932
933// For handle of given object type, return memory binding
934static bool get_mem_for_type(layer_data *dev_data, uint64_t handle, VkDebugReportObjectTypeEXT type, VkDeviceMemory *mem) {
935    bool skip_call = false;
936    *mem = VK_NULL_HANDLE;
937    switch (type) {
938    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT:
939        *mem = getImageNode(dev_data, VkImage(handle))->mem;
940        break;
941    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
942        *mem = getBufferNode(dev_data, VkBuffer(handle))->mem;
943        break;
944    default:
945        assert(0);
946    }
947    if (!*mem) {
948        skip_call = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT,
949                            "MEM", "Trying to get mem binding for %s object 0x%" PRIxLEAST64
950                                   " but binding is NULL. Has memory been bound to this object?",
951                            object_type_to_string(type), handle);
952    }
953    return skip_call;
954}
955
956// Print details of MemObjInfo list
957static void print_mem_list(layer_data *dev_data) {
958    // Early out if info is not requested
959    if (!(dev_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
960        return;
961    }
962
963    // Just printing each msg individually for now, may want to package these into single large print
964    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
965            MEMTRACK_NONE, "MEM", "Details of Memory Object list (of size " PRINTF_SIZE_T_SPECIFIER " elements)",
966            dev_data->memObjMap.size());
967    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
968            MEMTRACK_NONE, "MEM", "=============================");
969
970    if (dev_data->memObjMap.size() <= 0)
971        return;
972
973    for (auto ii = dev_data->memObjMap.begin(); ii != dev_data->memObjMap.end(); ++ii) {
974        auto mem_info = (*ii).second.get();
975
976        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
977                __LINE__, MEMTRACK_NONE, "MEM", "    ===MemObjInfo at 0x%p===", (void *)mem_info);
978        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
979                __LINE__, MEMTRACK_NONE, "MEM", "    Mem object: 0x%" PRIxLEAST64, (uint64_t)(mem_info->mem));
980        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
981                __LINE__, MEMTRACK_NONE, "MEM", "    Ref Count: " PRINTF_SIZE_T_SPECIFIER,
982                mem_info->command_buffer_bindings.size() + mem_info->obj_bindings.size());
983        if (0 != mem_info->alloc_info.allocationSize) {
984            string pAllocInfoMsg = vk_print_vkmemoryallocateinfo(&mem_info->alloc_info, "MEM(INFO):         ");
985            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
986                    __LINE__, MEMTRACK_NONE, "MEM", "    Mem Alloc info:\n%s", pAllocInfoMsg.c_str());
987        } else {
988            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
989                    __LINE__, MEMTRACK_NONE, "MEM", "    Mem Alloc info is NULL (alloc done by vkCreateSwapchainKHR())");
990        }
991
992        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
993                __LINE__, MEMTRACK_NONE, "MEM", "    VK OBJECT Binding list of size " PRINTF_SIZE_T_SPECIFIER " elements:",
994                mem_info->obj_bindings.size());
995        if (mem_info->obj_bindings.size() > 0) {
996            for (auto obj : mem_info->obj_bindings) {
997                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
998                        0, __LINE__, MEMTRACK_NONE, "MEM", "       VK OBJECT 0x%" PRIx64, obj.handle);
999            }
1000        }
1001
1002        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
1003                __LINE__, MEMTRACK_NONE, "MEM",
1004                "    VK Command Buffer (CB) binding list of size " PRINTF_SIZE_T_SPECIFIER " elements",
1005                mem_info->command_buffer_bindings.size());
1006        if (mem_info->command_buffer_bindings.size() > 0) {
1007            for (auto cb : mem_info->command_buffer_bindings) {
1008                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
1009                        0, __LINE__, MEMTRACK_NONE, "MEM", "      VK CB 0x%p", cb);
1010            }
1011        }
1012    }
1013}
1014
1015static void printCBList(layer_data *my_data) {
1016    GLOBAL_CB_NODE *pCBInfo = NULL;
1017
1018    // Early out if info is not requested
1019    if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
1020        return;
1021    }
1022
1023    log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
1024            MEMTRACK_NONE, "MEM", "Details of CB list (of size " PRINTF_SIZE_T_SPECIFIER " elements)",
1025            my_data->commandBufferMap.size());
1026    log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
1027            MEMTRACK_NONE, "MEM", "==================");
1028
1029    if (my_data->commandBufferMap.size() <= 0)
1030        return;
1031
1032    for (auto &cb_node : my_data->commandBufferMap) {
1033        pCBInfo = cb_node.second;
1034
1035        log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
1036                __LINE__, MEMTRACK_NONE, "MEM", "    CB Info (0x%p) has CB 0x%p", (void *)pCBInfo, (void *)pCBInfo->commandBuffer);
1037
1038        if (pCBInfo->memObjs.size() <= 0)
1039            continue;
1040        for (auto obj : pCBInfo->memObjs) {
1041            log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
1042                    __LINE__, MEMTRACK_NONE, "MEM", "      Mem obj 0x%" PRIx64, (uint64_t)obj);
1043        }
1044    }
1045}
1046
1047// Return a string representation of CMD_TYPE enum
1048static string cmdTypeToString(CMD_TYPE cmd) {
1049    switch (cmd) {
1050    case CMD_BINDPIPELINE:
1051        return "CMD_BINDPIPELINE";
1052    case CMD_BINDPIPELINEDELTA:
1053        return "CMD_BINDPIPELINEDELTA";
1054    case CMD_SETVIEWPORTSTATE:
1055        return "CMD_SETVIEWPORTSTATE";
1056    case CMD_SETLINEWIDTHSTATE:
1057        return "CMD_SETLINEWIDTHSTATE";
1058    case CMD_SETDEPTHBIASSTATE:
1059        return "CMD_SETDEPTHBIASSTATE";
1060    case CMD_SETBLENDSTATE:
1061        return "CMD_SETBLENDSTATE";
1062    case CMD_SETDEPTHBOUNDSSTATE:
1063        return "CMD_SETDEPTHBOUNDSSTATE";
1064    case CMD_SETSTENCILREADMASKSTATE:
1065        return "CMD_SETSTENCILREADMASKSTATE";
1066    case CMD_SETSTENCILWRITEMASKSTATE:
1067        return "CMD_SETSTENCILWRITEMASKSTATE";
1068    case CMD_SETSTENCILREFERENCESTATE:
1069        return "CMD_SETSTENCILREFERENCESTATE";
1070    case CMD_BINDDESCRIPTORSETS:
1071        return "CMD_BINDDESCRIPTORSETS";
1072    case CMD_BINDINDEXBUFFER:
1073        return "CMD_BINDINDEXBUFFER";
1074    case CMD_BINDVERTEXBUFFER:
1075        return "CMD_BINDVERTEXBUFFER";
1076    case CMD_DRAW:
1077        return "CMD_DRAW";
1078    case CMD_DRAWINDEXED:
1079        return "CMD_DRAWINDEXED";
1080    case CMD_DRAWINDIRECT:
1081        return "CMD_DRAWINDIRECT";
1082    case CMD_DRAWINDEXEDINDIRECT:
1083        return "CMD_DRAWINDEXEDINDIRECT";
1084    case CMD_DISPATCH:
1085        return "CMD_DISPATCH";
1086    case CMD_DISPATCHINDIRECT:
1087        return "CMD_DISPATCHINDIRECT";
1088    case CMD_COPYBUFFER:
1089        return "CMD_COPYBUFFER";
1090    case CMD_COPYIMAGE:
1091        return "CMD_COPYIMAGE";
1092    case CMD_BLITIMAGE:
1093        return "CMD_BLITIMAGE";
1094    case CMD_COPYBUFFERTOIMAGE:
1095        return "CMD_COPYBUFFERTOIMAGE";
1096    case CMD_COPYIMAGETOBUFFER:
1097        return "CMD_COPYIMAGETOBUFFER";
1098    case CMD_CLONEIMAGEDATA:
1099        return "CMD_CLONEIMAGEDATA";
1100    case CMD_UPDATEBUFFER:
1101        return "CMD_UPDATEBUFFER";
1102    case CMD_FILLBUFFER:
1103        return "CMD_FILLBUFFER";
1104    case CMD_CLEARCOLORIMAGE:
1105        return "CMD_CLEARCOLORIMAGE";
1106    case CMD_CLEARATTACHMENTS:
1107        return "CMD_CLEARCOLORATTACHMENT";
1108    case CMD_CLEARDEPTHSTENCILIMAGE:
1109        return "CMD_CLEARDEPTHSTENCILIMAGE";
1110    case CMD_RESOLVEIMAGE:
1111        return "CMD_RESOLVEIMAGE";
1112    case CMD_SETEVENT:
1113        return "CMD_SETEVENT";
1114    case CMD_RESETEVENT:
1115        return "CMD_RESETEVENT";
1116    case CMD_WAITEVENTS:
1117        return "CMD_WAITEVENTS";
1118    case CMD_PIPELINEBARRIER:
1119        return "CMD_PIPELINEBARRIER";
1120    case CMD_BEGINQUERY:
1121        return "CMD_BEGINQUERY";
1122    case CMD_ENDQUERY:
1123        return "CMD_ENDQUERY";
1124    case CMD_RESETQUERYPOOL:
1125        return "CMD_RESETQUERYPOOL";
1126    case CMD_COPYQUERYPOOLRESULTS:
1127        return "CMD_COPYQUERYPOOLRESULTS";
1128    case CMD_WRITETIMESTAMP:
1129        return "CMD_WRITETIMESTAMP";
1130    case CMD_INITATOMICCOUNTERS:
1131        return "CMD_INITATOMICCOUNTERS";
1132    case CMD_LOADATOMICCOUNTERS:
1133        return "CMD_LOADATOMICCOUNTERS";
1134    case CMD_SAVEATOMICCOUNTERS:
1135        return "CMD_SAVEATOMICCOUNTERS";
1136    case CMD_BEGINRENDERPASS:
1137        return "CMD_BEGINRENDERPASS";
1138    case CMD_ENDRENDERPASS:
1139        return "CMD_ENDRENDERPASS";
1140    default:
1141        return "UNKNOWN";
1142    }
1143}
1144
1145// SPIRV utility functions
1146static void build_def_index(shader_module *module) {
1147    for (auto insn : *module) {
1148        switch (insn.opcode()) {
1149        /* Types */
1150        case spv::OpTypeVoid:
1151        case spv::OpTypeBool:
1152        case spv::OpTypeInt:
1153        case spv::OpTypeFloat:
1154        case spv::OpTypeVector:
1155        case spv::OpTypeMatrix:
1156        case spv::OpTypeImage:
1157        case spv::OpTypeSampler:
1158        case spv::OpTypeSampledImage:
1159        case spv::OpTypeArray:
1160        case spv::OpTypeRuntimeArray:
1161        case spv::OpTypeStruct:
1162        case spv::OpTypeOpaque:
1163        case spv::OpTypePointer:
1164        case spv::OpTypeFunction:
1165        case spv::OpTypeEvent:
1166        case spv::OpTypeDeviceEvent:
1167        case spv::OpTypeReserveId:
1168        case spv::OpTypeQueue:
1169        case spv::OpTypePipe:
1170            module->def_index[insn.word(1)] = insn.offset();
1171            break;
1172
1173        /* Fixed constants */
1174        case spv::OpConstantTrue:
1175        case spv::OpConstantFalse:
1176        case spv::OpConstant:
1177        case spv::OpConstantComposite:
1178        case spv::OpConstantSampler:
1179        case spv::OpConstantNull:
1180            module->def_index[insn.word(2)] = insn.offset();
1181            break;
1182
1183        /* Specialization constants */
1184        case spv::OpSpecConstantTrue:
1185        case spv::OpSpecConstantFalse:
1186        case spv::OpSpecConstant:
1187        case spv::OpSpecConstantComposite:
1188        case spv::OpSpecConstantOp:
1189            module->def_index[insn.word(2)] = insn.offset();
1190            break;
1191
1192        /* Variables */
1193        case spv::OpVariable:
1194            module->def_index[insn.word(2)] = insn.offset();
1195            break;
1196
1197        /* Functions */
1198        case spv::OpFunction:
1199            module->def_index[insn.word(2)] = insn.offset();
1200            break;
1201
1202        default:
1203            /* We don't care about any other defs for now. */
1204            break;
1205        }
1206    }
1207}
1208
1209static spirv_inst_iter find_entrypoint(shader_module *src, char const *name, VkShaderStageFlagBits stageBits) {
1210    for (auto insn : *src) {
1211        if (insn.opcode() == spv::OpEntryPoint) {
1212            auto entrypointName = (char const *)&insn.word(3);
1213            auto entrypointStageBits = 1u << insn.word(1);
1214
1215            if (!strcmp(entrypointName, name) && (entrypointStageBits & stageBits)) {
1216                return insn;
1217            }
1218        }
1219    }
1220
1221    return src->end();
1222}
1223
1224static char const *storage_class_name(unsigned sc) {
1225    switch (sc) {
1226    case spv::StorageClassInput:
1227        return "input";
1228    case spv::StorageClassOutput:
1229        return "output";
1230    case spv::StorageClassUniformConstant:
1231        return "const uniform";
1232    case spv::StorageClassUniform:
1233        return "uniform";
1234    case spv::StorageClassWorkgroup:
1235        return "workgroup local";
1236    case spv::StorageClassCrossWorkgroup:
1237        return "workgroup global";
1238    case spv::StorageClassPrivate:
1239        return "private global";
1240    case spv::StorageClassFunction:
1241        return "function";
1242    case spv::StorageClassGeneric:
1243        return "generic";
1244    case spv::StorageClassAtomicCounter:
1245        return "atomic counter";
1246    case spv::StorageClassImage:
1247        return "image";
1248    case spv::StorageClassPushConstant:
1249        return "push constant";
1250    default:
1251        return "unknown";
1252    }
1253}
1254
1255/* get the value of an integral constant */
1256unsigned get_constant_value(shader_module const *src, unsigned id) {
1257    auto value = src->get_def(id);
1258    assert(value != src->end());
1259
1260    if (value.opcode() != spv::OpConstant) {
1261        /* TODO: Either ensure that the specialization transform is already performed on a module we're
1262            considering here, OR -- specialize on the fly now.
1263            */
1264        return 1;
1265    }
1266
1267    return value.word(3);
1268}
1269
1270
1271static void describe_type_inner(std::ostringstream &ss, shader_module const *src, unsigned type) {
1272    auto insn = src->get_def(type);
1273    assert(insn != src->end());
1274
1275    switch (insn.opcode()) {
1276    case spv::OpTypeBool:
1277        ss << "bool";
1278        break;
1279    case spv::OpTypeInt:
1280        ss << (insn.word(3) ? 's' : 'u') << "int" << insn.word(2);
1281        break;
1282    case spv::OpTypeFloat:
1283        ss << "float" << insn.word(2);
1284        break;
1285    case spv::OpTypeVector:
1286        ss << "vec" << insn.word(3) << " of ";
1287        describe_type_inner(ss, src, insn.word(2));
1288        break;
1289    case spv::OpTypeMatrix:
1290        ss << "mat" << insn.word(3) << " of ";
1291        describe_type_inner(ss, src, insn.word(2));
1292        break;
1293    case spv::OpTypeArray:
1294        ss << "arr[" << get_constant_value(src, insn.word(3)) << "] of ";
1295        describe_type_inner(ss, src, insn.word(2));
1296        break;
1297    case spv::OpTypePointer:
1298        ss << "ptr to " << storage_class_name(insn.word(2)) << " ";
1299        describe_type_inner(ss, src, insn.word(3));
1300        break;
1301    case spv::OpTypeStruct: {
1302        ss << "struct of (";
1303        for (unsigned i = 2; i < insn.len(); i++) {
1304            describe_type_inner(ss, src, insn.word(i));
1305            if (i == insn.len() - 1) {
1306                ss << ")";
1307            } else {
1308                ss << ", ";
1309            }
1310        }
1311        break;
1312    }
1313    case spv::OpTypeSampler:
1314        ss << "sampler";
1315        break;
1316    case spv::OpTypeSampledImage:
1317        ss << "sampler+";
1318        describe_type_inner(ss, src, insn.word(2));
1319        break;
1320    case spv::OpTypeImage:
1321        ss << "image(dim=" << insn.word(3) << ", sampled=" << insn.word(7) << ")";
1322        break;
1323    default:
1324        ss << "oddtype";
1325        break;
1326    }
1327}
1328
1329
1330static std::string describe_type(shader_module const *src, unsigned type) {
1331    std::ostringstream ss;
1332    describe_type_inner(ss, src, type);
1333    return ss.str();
1334}
1335
1336
1337static bool is_narrow_numeric_type(spirv_inst_iter type)
1338{
1339    if (type.opcode() != spv::OpTypeInt && type.opcode() != spv::OpTypeFloat)
1340        return false;
1341    return type.word(2) < 64;
1342}
1343
1344
1345static bool types_match(shader_module const *a, shader_module const *b, unsigned a_type, unsigned b_type, bool a_arrayed, bool b_arrayed, bool relaxed) {
1346    /* walk two type trees together, and complain about differences */
1347    auto a_insn = a->get_def(a_type);
1348    auto b_insn = b->get_def(b_type);
1349    assert(a_insn != a->end());
1350    assert(b_insn != b->end());
1351
1352    if (a_arrayed && a_insn.opcode() == spv::OpTypeArray) {
1353        return types_match(a, b, a_insn.word(2), b_type, false, b_arrayed, relaxed);
1354    }
1355
1356    if (b_arrayed && b_insn.opcode() == spv::OpTypeArray) {
1357        /* we probably just found the extra level of arrayness in b_type: compare the type inside it to a_type */
1358        return types_match(a, b, a_type, b_insn.word(2), a_arrayed, false, relaxed);
1359    }
1360
1361    if (a_insn.opcode() == spv::OpTypeVector && relaxed && is_narrow_numeric_type(b_insn)) {
1362        return types_match(a, b, a_insn.word(2), b_type, a_arrayed, b_arrayed, false);
1363    }
1364
1365    if (a_insn.opcode() != b_insn.opcode()) {
1366        return false;
1367    }
1368
1369    if (a_insn.opcode() == spv::OpTypePointer) {
1370        /* match on pointee type. storage class is expected to differ */
1371        return types_match(a, b, a_insn.word(3), b_insn.word(3), a_arrayed, b_arrayed, relaxed);
1372    }
1373
1374    if (a_arrayed || b_arrayed) {
1375        /* if we havent resolved array-of-verts by here, we're not going to. */
1376        return false;
1377    }
1378
1379    switch (a_insn.opcode()) {
1380    case spv::OpTypeBool:
1381        return true;
1382    case spv::OpTypeInt:
1383        /* match on width, signedness */
1384        return a_insn.word(2) == b_insn.word(2) && a_insn.word(3) == b_insn.word(3);
1385    case spv::OpTypeFloat:
1386        /* match on width */
1387        return a_insn.word(2) == b_insn.word(2);
1388    case spv::OpTypeVector:
1389        /* match on element type, count. */
1390        if (!types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false))
1391            return false;
1392        if (relaxed && is_narrow_numeric_type(a->get_def(a_insn.word(2)))) {
1393            return a_insn.word(3) >= b_insn.word(3);
1394        }
1395        else {
1396            return a_insn.word(3) == b_insn.word(3);
1397        }
1398    case spv::OpTypeMatrix:
1399        /* match on element type, count. */
1400        return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false) && a_insn.word(3) == b_insn.word(3);
1401    case spv::OpTypeArray:
1402        /* match on element type, count. these all have the same layout. we don't get here if
1403         * b_arrayed. This differs from vector & matrix types in that the array size is the id of a constant instruction,
1404         * not a literal within OpTypeArray */
1405        return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false) &&
1406               get_constant_value(a, a_insn.word(3)) == get_constant_value(b, b_insn.word(3));
1407    case spv::OpTypeStruct:
1408        /* match on all element types */
1409        {
1410            if (a_insn.len() != b_insn.len()) {
1411                return false; /* structs cannot match if member counts differ */
1412            }
1413
1414            for (unsigned i = 2; i < a_insn.len(); i++) {
1415                if (!types_match(a, b, a_insn.word(i), b_insn.word(i), a_arrayed, b_arrayed, false)) {
1416                    return false;
1417                }
1418            }
1419
1420            return true;
1421        }
1422    default:
1423        /* remaining types are CLisms, or may not appear in the interfaces we
1424         * are interested in. Just claim no match.
1425         */
1426        return false;
1427    }
1428}
1429
1430static int value_or_default(std::unordered_map<unsigned, unsigned> const &map, unsigned id, int def) {
1431    auto it = map.find(id);
1432    if (it == map.end())
1433        return def;
1434    else
1435        return it->second;
1436}
1437
1438static unsigned get_locations_consumed_by_type(shader_module const *src, unsigned type, bool strip_array_level) {
1439    auto insn = src->get_def(type);
1440    assert(insn != src->end());
1441
1442    switch (insn.opcode()) {
1443    case spv::OpTypePointer:
1444        /* see through the ptr -- this is only ever at the toplevel for graphics shaders;
1445         * we're never actually passing pointers around. */
1446        return get_locations_consumed_by_type(src, insn.word(3), strip_array_level);
1447    case spv::OpTypeArray:
1448        if (strip_array_level) {
1449            return get_locations_consumed_by_type(src, insn.word(2), false);
1450        } else {
1451            return get_constant_value(src, insn.word(3)) * get_locations_consumed_by_type(src, insn.word(2), false);
1452        }
1453    case spv::OpTypeMatrix:
1454        /* num locations is the dimension * element size */
1455        return insn.word(3) * get_locations_consumed_by_type(src, insn.word(2), false);
1456    case spv::OpTypeVector: {
1457        auto scalar_type = src->get_def(insn.word(2));
1458        auto bit_width = (scalar_type.opcode() == spv::OpTypeInt || scalar_type.opcode() == spv::OpTypeFloat) ?
1459            scalar_type.word(2) : 32;
1460
1461        /* locations are 128-bit wide; 3- and 4-component vectors of 64 bit
1462         * types require two. */
1463        return (bit_width * insn.word(3) + 127) / 128;
1464    }
1465    default:
1466        /* everything else is just 1. */
1467        return 1;
1468
1469        /* TODO: extend to handle 64bit scalar types, whose vectors may need
1470         * multiple locations. */
1471    }
1472}
1473
1474static unsigned get_locations_consumed_by_format(VkFormat format) {
1475    switch (format) {
1476    case VK_FORMAT_R64G64B64A64_SFLOAT:
1477    case VK_FORMAT_R64G64B64A64_SINT:
1478    case VK_FORMAT_R64G64B64A64_UINT:
1479    case VK_FORMAT_R64G64B64_SFLOAT:
1480    case VK_FORMAT_R64G64B64_SINT:
1481    case VK_FORMAT_R64G64B64_UINT:
1482        return 2;
1483    default:
1484        return 1;
1485    }
1486}
1487
1488typedef std::pair<unsigned, unsigned> location_t;
1489typedef std::pair<unsigned, unsigned> descriptor_slot_t;
1490
1491struct interface_var {
1492    uint32_t id;
1493    uint32_t type_id;
1494    uint32_t offset;
1495    bool is_patch;
1496    bool is_block_member;
1497    /* TODO: collect the name, too? Isn't required to be present. */
1498};
1499
1500struct shader_stage_attributes {
1501    char const *const name;
1502    bool arrayed_input;
1503    bool arrayed_output;
1504};
1505
1506static shader_stage_attributes shader_stage_attribs[] = {
1507    {"vertex shader", false, false},
1508    {"tessellation control shader", true, true},
1509    {"tessellation evaluation shader", true, false},
1510    {"geometry shader", true, false},
1511    {"fragment shader", false, false},
1512};
1513
1514static spirv_inst_iter get_struct_type(shader_module const *src, spirv_inst_iter def, bool is_array_of_verts) {
1515    while (true) {
1516
1517        if (def.opcode() == spv::OpTypePointer) {
1518            def = src->get_def(def.word(3));
1519        } else if (def.opcode() == spv::OpTypeArray && is_array_of_verts) {
1520            def = src->get_def(def.word(2));
1521            is_array_of_verts = false;
1522        } else if (def.opcode() == spv::OpTypeStruct) {
1523            return def;
1524        } else {
1525            return src->end();
1526        }
1527    }
1528}
1529
1530static void collect_interface_block_members(shader_module const *src,
1531                                            std::map<location_t, interface_var> *out,
1532                                            std::unordered_map<unsigned, unsigned> const &blocks, bool is_array_of_verts,
1533                                            uint32_t id, uint32_t type_id, bool is_patch) {
1534    /* Walk down the type_id presented, trying to determine whether it's actually an interface block. */
1535    auto type = get_struct_type(src, src->get_def(type_id), is_array_of_verts && !is_patch);
1536    if (type == src->end() || blocks.find(type.word(1)) == blocks.end()) {
1537        /* this isn't an interface block. */
1538        return;
1539    }
1540
1541    std::unordered_map<unsigned, unsigned> member_components;
1542
1543    /* Walk all the OpMemberDecorate for type's result id -- first pass, collect components. */
1544    for (auto insn : *src) {
1545        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1546            unsigned member_index = insn.word(2);
1547
1548            if (insn.word(3) == spv::DecorationComponent) {
1549                unsigned component = insn.word(4);
1550                member_components[member_index] = component;
1551            }
1552        }
1553    }
1554
1555    /* Second pass -- produce the output, from Location decorations */
1556    for (auto insn : *src) {
1557        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1558            unsigned member_index = insn.word(2);
1559            unsigned member_type_id = type.word(2 + member_index);
1560
1561            if (insn.word(3) == spv::DecorationLocation) {
1562                unsigned location = insn.word(4);
1563                unsigned num_locations = get_locations_consumed_by_type(src, member_type_id, false);
1564                auto component_it = member_components.find(member_index);
1565                unsigned component = component_it == member_components.end() ? 0 : component_it->second;
1566
1567                for (unsigned int offset = 0; offset < num_locations; offset++) {
1568                    interface_var v;
1569                    v.id = id;
1570                    /* TODO: member index in interface_var too? */
1571                    v.type_id = member_type_id;
1572                    v.offset = offset;
1573                    v.is_patch = is_patch;
1574                    v.is_block_member = true;
1575                    (*out)[std::make_pair(location + offset, component)] = v;
1576                }
1577            }
1578        }
1579    }
1580}
1581
1582static std::map<location_t, interface_var> collect_interface_by_location(
1583        shader_module const *src, spirv_inst_iter entrypoint,
1584        spv::StorageClass sinterface, bool is_array_of_verts) {
1585
1586    std::unordered_map<unsigned, unsigned> var_locations;
1587    std::unordered_map<unsigned, unsigned> var_builtins;
1588    std::unordered_map<unsigned, unsigned> var_components;
1589    std::unordered_map<unsigned, unsigned> blocks;
1590    std::unordered_map<unsigned, unsigned> var_patch;
1591
1592    for (auto insn : *src) {
1593
1594        /* We consider two interface models: SSO rendezvous-by-location, and
1595         * builtins. Complain about anything that fits neither model.
1596         */
1597        if (insn.opcode() == spv::OpDecorate) {
1598            if (insn.word(2) == spv::DecorationLocation) {
1599                var_locations[insn.word(1)] = insn.word(3);
1600            }
1601
1602            if (insn.word(2) == spv::DecorationBuiltIn) {
1603                var_builtins[insn.word(1)] = insn.word(3);
1604            }
1605
1606            if (insn.word(2) == spv::DecorationComponent) {
1607                var_components[insn.word(1)] = insn.word(3);
1608            }
1609
1610            if (insn.word(2) == spv::DecorationBlock) {
1611                blocks[insn.word(1)] = 1;
1612            }
1613
1614            if (insn.word(2) == spv::DecorationPatch) {
1615                var_patch[insn.word(1)] = 1;
1616            }
1617        }
1618    }
1619
1620    /* TODO: handle grouped decorations */
1621    /* TODO: handle index=1 dual source outputs from FS -- two vars will
1622     * have the same location, and we DON'T want to clobber. */
1623
1624    /* find the end of the entrypoint's name string. additional zero bytes follow the actual null
1625       terminator, to fill out the rest of the word - so we only need to look at the last byte in
1626       the word to determine which word contains the terminator. */
1627    uint32_t word = 3;
1628    while (entrypoint.word(word) & 0xff000000u) {
1629        ++word;
1630    }
1631    ++word;
1632
1633    std::map<location_t, interface_var> out;
1634
1635    for (; word < entrypoint.len(); word++) {
1636        auto insn = src->get_def(entrypoint.word(word));
1637        assert(insn != src->end());
1638        assert(insn.opcode() == spv::OpVariable);
1639
1640        if (insn.word(3) == static_cast<uint32_t>(sinterface)) {
1641            unsigned id = insn.word(2);
1642            unsigned type = insn.word(1);
1643
1644            int location = value_or_default(var_locations, id, -1);
1645            int builtin = value_or_default(var_builtins, id, -1);
1646            unsigned component = value_or_default(var_components, id, 0); /* unspecified is OK, is 0 */
1647            bool is_patch = var_patch.find(id) != var_patch.end();
1648
1649            /* All variables and interface block members in the Input or Output storage classes
1650             * must be decorated with either a builtin or an explicit location.
1651             *
1652             * TODO: integrate the interface block support here. For now, don't complain --
1653             * a valid SPIRV module will only hit this path for the interface block case, as the
1654             * individual members of the type are decorated, rather than variable declarations.
1655             */
1656
1657            if (location != -1) {
1658                /* A user-defined interface variable, with a location. Where a variable
1659                 * occupied multiple locations, emit one result for each. */
1660                unsigned num_locations = get_locations_consumed_by_type(src, type, is_array_of_verts && !is_patch);
1661                for (unsigned int offset = 0; offset < num_locations; offset++) {
1662                    interface_var v;
1663                    v.id = id;
1664                    v.type_id = type;
1665                    v.offset = offset;
1666                    v.is_patch = is_patch;
1667                    v.is_block_member = false;
1668                    out[std::make_pair(location + offset, component)] = v;
1669                }
1670            } else if (builtin == -1) {
1671                /* An interface block instance */
1672                collect_interface_block_members(src, &out, blocks, is_array_of_verts, id, type, is_patch);
1673            }
1674        }
1675    }
1676
1677    return out;
1678}
1679
1680static std::vector<std::pair<uint32_t, interface_var>> collect_interface_by_input_attachment_index(
1681        debug_report_data *report_data, shader_module const *src,
1682        std::unordered_set<uint32_t> const &accessible_ids) {
1683
1684    std::vector<std::pair<uint32_t, interface_var>> out;
1685
1686    for (auto insn : *src) {
1687        if (insn.opcode() == spv::OpDecorate) {
1688            if (insn.word(2) == spv::DecorationInputAttachmentIndex) {
1689                auto attachment_index = insn.word(3);
1690                auto id = insn.word(1);
1691
1692                if (accessible_ids.count(id)) {
1693                    auto def = src->get_def(id);
1694                    assert(def != src->end());
1695
1696                    if (def.opcode() == spv::OpVariable && insn.word(3) == spv::StorageClassUniformConstant) {
1697                        auto num_locations = get_locations_consumed_by_type(src, def.word(1), false);
1698                        for (unsigned int offset = 0; offset < num_locations; offset++) {
1699                            interface_var v;
1700                            v.id = id;
1701                            v.type_id = def.word(1);
1702                            v.offset = offset;
1703                            v.is_patch = false;
1704                            v.is_block_member = false;
1705                            out.emplace_back(attachment_index + offset, v);
1706                        }
1707                    }
1708                }
1709            }
1710        }
1711    }
1712
1713    return out;
1714}
1715
1716static std::vector<std::pair<descriptor_slot_t, interface_var>> collect_interface_by_descriptor_slot(
1717        debug_report_data *report_data, shader_module const *src,
1718        std::unordered_set<uint32_t> const &accessible_ids) {
1719
1720    std::unordered_map<unsigned, unsigned> var_sets;
1721    std::unordered_map<unsigned, unsigned> var_bindings;
1722
1723    for (auto insn : *src) {
1724        /* All variables in the Uniform or UniformConstant storage classes are required to be decorated with both
1725         * DecorationDescriptorSet and DecorationBinding.
1726         */
1727        if (insn.opcode() == spv::OpDecorate) {
1728            if (insn.word(2) == spv::DecorationDescriptorSet) {
1729                var_sets[insn.word(1)] = insn.word(3);
1730            }
1731
1732            if (insn.word(2) == spv::DecorationBinding) {
1733                var_bindings[insn.word(1)] = insn.word(3);
1734            }
1735        }
1736    }
1737
1738    std::vector<std::pair<descriptor_slot_t, interface_var>> out;
1739
1740    for (auto id : accessible_ids) {
1741        auto insn = src->get_def(id);
1742        assert(insn != src->end());
1743
1744        if (insn.opcode() == spv::OpVariable &&
1745            (insn.word(3) == spv::StorageClassUniform || insn.word(3) == spv::StorageClassUniformConstant)) {
1746            unsigned set = value_or_default(var_sets, insn.word(2), 0);
1747            unsigned binding = value_or_default(var_bindings, insn.word(2), 0);
1748
1749            interface_var v;
1750            v.id = insn.word(2);
1751            v.type_id = insn.word(1);
1752            v.offset = 0;
1753            v.is_patch = false;
1754            v.is_block_member = false;
1755            out.emplace_back(std::make_pair(set, binding), v);
1756        }
1757    }
1758
1759    return out;
1760}
1761
1762static bool validate_interface_between_stages(debug_report_data *report_data, shader_module const *producer,
1763                                              spirv_inst_iter producer_entrypoint, shader_stage_attributes const *producer_stage,
1764                                              shader_module const *consumer, spirv_inst_iter consumer_entrypoint,
1765                                              shader_stage_attributes const *consumer_stage) {
1766    bool pass = true;
1767
1768    auto outputs = collect_interface_by_location(producer, producer_entrypoint, spv::StorageClassOutput, producer_stage->arrayed_output);
1769    auto inputs = collect_interface_by_location(consumer, consumer_entrypoint, spv::StorageClassInput, consumer_stage->arrayed_input);
1770
1771    auto a_it = outputs.begin();
1772    auto b_it = inputs.begin();
1773
1774    /* maps sorted by key (location); walk them together to find mismatches */
1775    while ((outputs.size() > 0 && a_it != outputs.end()) || (inputs.size() && b_it != inputs.end())) {
1776        bool a_at_end = outputs.size() == 0 || a_it == outputs.end();
1777        bool b_at_end = inputs.size() == 0 || b_it == inputs.end();
1778        auto a_first = a_at_end ? std::make_pair(0u, 0u) : a_it->first;
1779        auto b_first = b_at_end ? std::make_pair(0u, 0u) : b_it->first;
1780
1781        if (b_at_end || ((!a_at_end) && (a_first < b_first))) {
1782            if (log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1783                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1784                        "%s writes to output location %u.%u which is not consumed by %s", producer_stage->name, a_first.first,
1785                        a_first.second, consumer_stage->name)) {
1786                pass = false;
1787            }
1788            a_it++;
1789        } else if (a_at_end || a_first > b_first) {
1790            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1791                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC",
1792                        "%s consumes input location %u.%u which is not written by %s", consumer_stage->name, b_first.first, b_first.second,
1793                        producer_stage->name)) {
1794                pass = false;
1795            }
1796            b_it++;
1797        } else {
1798            // subtleties of arrayed interfaces:
1799            // - if is_patch, then the member is not arrayed, even though the interface may be.
1800            // - if is_block_member, then the extra array level of an arrayed interface is not
1801            //   expressed in the member type -- it's expressed in the block type.
1802            if (!types_match(producer, consumer, a_it->second.type_id, b_it->second.type_id,
1803                             producer_stage->arrayed_output && !a_it->second.is_patch && !a_it->second.is_block_member,
1804                             consumer_stage->arrayed_input && !b_it->second.is_patch && !b_it->second.is_block_member,
1805                             true)) {
1806                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1807                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC", "Type mismatch on location %u.%u: '%s' vs '%s'",
1808                            a_first.first, a_first.second,
1809                            describe_type(producer, a_it->second.type_id).c_str(),
1810                            describe_type(consumer, b_it->second.type_id).c_str())) {
1811                    pass = false;
1812                }
1813            }
1814            if (a_it->second.is_patch != b_it->second.is_patch) {
1815                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1816                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1817                            "Decoration mismatch on location %u.%u: is per-%s in %s stage but "
1818                            "per-%s in %s stage", a_first.first, a_first.second,
1819                            a_it->second.is_patch ? "patch" : "vertex", producer_stage->name,
1820                            b_it->second.is_patch ? "patch" : "vertex", consumer_stage->name)) {
1821                    pass = false;
1822                }
1823            }
1824            a_it++;
1825            b_it++;
1826        }
1827    }
1828
1829    return pass;
1830}
1831
1832enum FORMAT_TYPE {
1833    FORMAT_TYPE_UNDEFINED,
1834    FORMAT_TYPE_FLOAT, /* UNORM, SNORM, FLOAT, USCALED, SSCALED, SRGB -- anything we consider float in the shader */
1835    FORMAT_TYPE_SINT,
1836    FORMAT_TYPE_UINT,
1837};
1838
1839static unsigned get_format_type(VkFormat fmt) {
1840    switch (fmt) {
1841    case VK_FORMAT_UNDEFINED:
1842        return FORMAT_TYPE_UNDEFINED;
1843    case VK_FORMAT_R8_SINT:
1844    case VK_FORMAT_R8G8_SINT:
1845    case VK_FORMAT_R8G8B8_SINT:
1846    case VK_FORMAT_R8G8B8A8_SINT:
1847    case VK_FORMAT_R16_SINT:
1848    case VK_FORMAT_R16G16_SINT:
1849    case VK_FORMAT_R16G16B16_SINT:
1850    case VK_FORMAT_R16G16B16A16_SINT:
1851    case VK_FORMAT_R32_SINT:
1852    case VK_FORMAT_R32G32_SINT:
1853    case VK_FORMAT_R32G32B32_SINT:
1854    case VK_FORMAT_R32G32B32A32_SINT:
1855    case VK_FORMAT_R64_SINT:
1856    case VK_FORMAT_R64G64_SINT:
1857    case VK_FORMAT_R64G64B64_SINT:
1858    case VK_FORMAT_R64G64B64A64_SINT:
1859    case VK_FORMAT_B8G8R8_SINT:
1860    case VK_FORMAT_B8G8R8A8_SINT:
1861    case VK_FORMAT_A8B8G8R8_SINT_PACK32:
1862    case VK_FORMAT_A2B10G10R10_SINT_PACK32:
1863    case VK_FORMAT_A2R10G10B10_SINT_PACK32:
1864        return FORMAT_TYPE_SINT;
1865    case VK_FORMAT_R8_UINT:
1866    case VK_FORMAT_R8G8_UINT:
1867    case VK_FORMAT_R8G8B8_UINT:
1868    case VK_FORMAT_R8G8B8A8_UINT:
1869    case VK_FORMAT_R16_UINT:
1870    case VK_FORMAT_R16G16_UINT:
1871    case VK_FORMAT_R16G16B16_UINT:
1872    case VK_FORMAT_R16G16B16A16_UINT:
1873    case VK_FORMAT_R32_UINT:
1874    case VK_FORMAT_R32G32_UINT:
1875    case VK_FORMAT_R32G32B32_UINT:
1876    case VK_FORMAT_R32G32B32A32_UINT:
1877    case VK_FORMAT_R64_UINT:
1878    case VK_FORMAT_R64G64_UINT:
1879    case VK_FORMAT_R64G64B64_UINT:
1880    case VK_FORMAT_R64G64B64A64_UINT:
1881    case VK_FORMAT_B8G8R8_UINT:
1882    case VK_FORMAT_B8G8R8A8_UINT:
1883    case VK_FORMAT_A8B8G8R8_UINT_PACK32:
1884    case VK_FORMAT_A2B10G10R10_UINT_PACK32:
1885    case VK_FORMAT_A2R10G10B10_UINT_PACK32:
1886        return FORMAT_TYPE_UINT;
1887    default:
1888        return FORMAT_TYPE_FLOAT;
1889    }
1890}
1891
1892/* characterizes a SPIR-V type appearing in an interface to a FF stage,
1893 * for comparison to a VkFormat's characterization above. */
1894static unsigned get_fundamental_type(shader_module const *src, unsigned type) {
1895    auto insn = src->get_def(type);
1896    assert(insn != src->end());
1897
1898    switch (insn.opcode()) {
1899    case spv::OpTypeInt:
1900        return insn.word(3) ? FORMAT_TYPE_SINT : FORMAT_TYPE_UINT;
1901    case spv::OpTypeFloat:
1902        return FORMAT_TYPE_FLOAT;
1903    case spv::OpTypeVector:
1904        return get_fundamental_type(src, insn.word(2));
1905    case spv::OpTypeMatrix:
1906        return get_fundamental_type(src, insn.word(2));
1907    case spv::OpTypeArray:
1908        return get_fundamental_type(src, insn.word(2));
1909    case spv::OpTypePointer:
1910        return get_fundamental_type(src, insn.word(3));
1911    case spv::OpTypeImage:
1912        return get_fundamental_type(src, insn.word(2));
1913
1914    default:
1915        return FORMAT_TYPE_UNDEFINED;
1916    }
1917}
1918
1919static uint32_t get_shader_stage_id(VkShaderStageFlagBits stage) {
1920    uint32_t bit_pos = u_ffs(stage);
1921    return bit_pos - 1;
1922}
1923
1924static bool validate_vi_consistency(debug_report_data *report_data, VkPipelineVertexInputStateCreateInfo const *vi) {
1925    /* walk the binding descriptions, which describe the step rate and stride of each vertex buffer.
1926     * each binding should be specified only once.
1927     */
1928    std::unordered_map<uint32_t, VkVertexInputBindingDescription const *> bindings;
1929    bool pass = true;
1930
1931    for (unsigned i = 0; i < vi->vertexBindingDescriptionCount; i++) {
1932        auto desc = &vi->pVertexBindingDescriptions[i];
1933        auto &binding = bindings[desc->binding];
1934        if (binding) {
1935            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1936                        __LINE__, SHADER_CHECKER_INCONSISTENT_VI, "SC",
1937                        "Duplicate vertex input binding descriptions for binding %d", desc->binding)) {
1938                pass = false;
1939            }
1940        } else {
1941            binding = desc;
1942        }
1943    }
1944
1945    return pass;
1946}
1947
1948static bool validate_vi_against_vs_inputs(debug_report_data *report_data, VkPipelineVertexInputStateCreateInfo const *vi,
1949                                          shader_module const *vs, spirv_inst_iter entrypoint) {
1950    bool pass = true;
1951
1952    auto inputs = collect_interface_by_location(vs, entrypoint, spv::StorageClassInput, false);
1953
1954    /* Build index by location */
1955    std::map<uint32_t, VkVertexInputAttributeDescription const *> attribs;
1956    if (vi) {
1957        for (unsigned i = 0; i < vi->vertexAttributeDescriptionCount; i++) {
1958            auto num_locations = get_locations_consumed_by_format(vi->pVertexAttributeDescriptions[i].format);
1959            for (auto j = 0u; j < num_locations; j++) {
1960                attribs[vi->pVertexAttributeDescriptions[i].location + j] = &vi->pVertexAttributeDescriptions[i];
1961            }
1962        }
1963    }
1964
1965    auto it_a = attribs.begin();
1966    auto it_b = inputs.begin();
1967    bool used = false;
1968
1969    while ((attribs.size() > 0 && it_a != attribs.end()) || (inputs.size() > 0 && it_b != inputs.end())) {
1970        bool a_at_end = attribs.size() == 0 || it_a == attribs.end();
1971        bool b_at_end = inputs.size() == 0 || it_b == inputs.end();
1972        auto a_first = a_at_end ? 0 : it_a->first;
1973        auto b_first = b_at_end ? 0 : it_b->first.first;
1974        if (!a_at_end && (b_at_end || a_first < b_first)) {
1975            if (!used && log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1976                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1977                        "Vertex attribute at location %d not consumed by VS", a_first)) {
1978                pass = false;
1979            }
1980            used = false;
1981            it_a++;
1982        } else if (!b_at_end && (a_at_end || b_first < a_first)) {
1983            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1984                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "Vertex shader consumes input at location %d but not provided",
1985                        b_first)) {
1986                pass = false;
1987            }
1988            it_b++;
1989        } else {
1990            unsigned attrib_type = get_format_type(it_a->second->format);
1991            unsigned input_type = get_fundamental_type(vs, it_b->second.type_id);
1992
1993            /* type checking */
1994            if (attrib_type != FORMAT_TYPE_UNDEFINED && input_type != FORMAT_TYPE_UNDEFINED && attrib_type != input_type) {
1995                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1996                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1997                            "Attribute type of `%s` at location %d does not match vertex shader input type of `%s`",
1998                            string_VkFormat(it_a->second->format), a_first,
1999                            describe_type(vs, it_b->second.type_id).c_str())) {
2000                    pass = false;
2001                }
2002            }
2003
2004            /* OK! */
2005            used = true;
2006            it_b++;
2007        }
2008    }
2009
2010    return pass;
2011}
2012
2013static bool validate_fs_outputs_against_render_pass(debug_report_data *report_data, shader_module const *fs,
2014                                                    spirv_inst_iter entrypoint, VkRenderPassCreateInfo const *rpci,
2015                                                    uint32_t subpass_index) {
2016    std::map<uint32_t, VkFormat> color_attachments;
2017    auto subpass = rpci->pSubpasses[subpass_index];
2018    for (auto i = 0u; i < subpass.colorAttachmentCount; ++i) {
2019        uint32_t attachment = subpass.pColorAttachments[i].attachment;
2020        if (attachment == VK_ATTACHMENT_UNUSED)
2021            continue;
2022        if (rpci->pAttachments[attachment].format != VK_FORMAT_UNDEFINED) {
2023            color_attachments[i] = rpci->pAttachments[attachment].format;
2024        }
2025    }
2026
2027    bool pass = true;
2028
2029    /* TODO: dual source blend index (spv::DecIndex, zero if not provided) */
2030
2031    auto outputs = collect_interface_by_location(fs, entrypoint, spv::StorageClassOutput, false);
2032
2033    auto it_a = outputs.begin();
2034    auto it_b = color_attachments.begin();
2035
2036    /* Walk attachment list and outputs together */
2037
2038    while ((outputs.size() > 0 && it_a != outputs.end()) || (color_attachments.size() > 0 && it_b != color_attachments.end())) {
2039        bool a_at_end = outputs.size() == 0 || it_a == outputs.end();
2040        bool b_at_end = color_attachments.size() == 0 || it_b == color_attachments.end();
2041
2042        if (!a_at_end && (b_at_end || it_a->first.first < it_b->first)) {
2043            if (log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2044                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
2045                        "FS writes to output location %d with no matching attachment", it_a->first.first)) {
2046                pass = false;
2047            }
2048            it_a++;
2049        } else if (!b_at_end && (a_at_end || it_a->first.first > it_b->first)) {
2050            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2051                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "Attachment %d not written by FS", it_b->first)) {
2052                pass = false;
2053            }
2054            it_b++;
2055        } else {
2056            unsigned output_type = get_fundamental_type(fs, it_a->second.type_id);
2057            unsigned att_type = get_format_type(it_b->second);
2058
2059            /* type checking */
2060            if (att_type != FORMAT_TYPE_UNDEFINED && output_type != FORMAT_TYPE_UNDEFINED && att_type != output_type) {
2061                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2062                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
2063                            "Attachment %d of type `%s` does not match FS output type of `%s`", it_b->first,
2064                            string_VkFormat(it_b->second),
2065                            describe_type(fs, it_a->second.type_id).c_str())) {
2066                    pass = false;
2067                }
2068            }
2069
2070            /* OK! */
2071            it_a++;
2072            it_b++;
2073        }
2074    }
2075
2076    return pass;
2077}
2078
2079/* For some analyses, we need to know about all ids referenced by the static call tree of a particular
2080 * entrypoint. This is important for identifying the set of shader resources actually used by an entrypoint,
2081 * for example.
2082 * Note: we only explore parts of the image which might actually contain ids we care about for the above analyses.
2083 *  - NOT the shader input/output interfaces.
2084 *
2085 * TODO: The set of interesting opcodes here was determined by eyeballing the SPIRV spec. It might be worth
2086 * converting parts of this to be generated from the machine-readable spec instead.
2087 */
2088static std::unordered_set<uint32_t> mark_accessible_ids(shader_module const *src, spirv_inst_iter entrypoint) {
2089    std::unordered_set<uint32_t> ids;
2090    std::unordered_set<uint32_t> worklist;
2091    worklist.insert(entrypoint.word(2));
2092
2093    while (!worklist.empty()) {
2094        auto id_iter = worklist.begin();
2095        auto id = *id_iter;
2096        worklist.erase(id_iter);
2097
2098        auto insn = src->get_def(id);
2099        if (insn == src->end()) {
2100            /* id is something we didn't collect in build_def_index. that's OK -- we'll stumble
2101             * across all kinds of things here that we may not care about. */
2102            continue;
2103        }
2104
2105        /* try to add to the output set */
2106        if (!ids.insert(id).second) {
2107            continue; /* if we already saw this id, we don't want to walk it again. */
2108        }
2109
2110        switch (insn.opcode()) {
2111        case spv::OpFunction:
2112            /* scan whole body of the function, enlisting anything interesting */
2113            while (++insn, insn.opcode() != spv::OpFunctionEnd) {
2114                switch (insn.opcode()) {
2115                case spv::OpLoad:
2116                case spv::OpAtomicLoad:
2117                case spv::OpAtomicExchange:
2118                case spv::OpAtomicCompareExchange:
2119                case spv::OpAtomicCompareExchangeWeak:
2120                case spv::OpAtomicIIncrement:
2121                case spv::OpAtomicIDecrement:
2122                case spv::OpAtomicIAdd:
2123                case spv::OpAtomicISub:
2124                case spv::OpAtomicSMin:
2125                case spv::OpAtomicUMin:
2126                case spv::OpAtomicSMax:
2127                case spv::OpAtomicUMax:
2128                case spv::OpAtomicAnd:
2129                case spv::OpAtomicOr:
2130                case spv::OpAtomicXor:
2131                    worklist.insert(insn.word(3)); /* ptr */
2132                    break;
2133                case spv::OpStore:
2134                case spv::OpAtomicStore:
2135                    worklist.insert(insn.word(1)); /* ptr */
2136                    break;
2137                case spv::OpAccessChain:
2138                case spv::OpInBoundsAccessChain:
2139                    worklist.insert(insn.word(3)); /* base ptr */
2140                    break;
2141                case spv::OpSampledImage:
2142                case spv::OpImageSampleImplicitLod:
2143                case spv::OpImageSampleExplicitLod:
2144                case spv::OpImageSampleDrefImplicitLod:
2145                case spv::OpImageSampleDrefExplicitLod:
2146                case spv::OpImageSampleProjImplicitLod:
2147                case spv::OpImageSampleProjExplicitLod:
2148                case spv::OpImageSampleProjDrefImplicitLod:
2149                case spv::OpImageSampleProjDrefExplicitLod:
2150                case spv::OpImageFetch:
2151                case spv::OpImageGather:
2152                case spv::OpImageDrefGather:
2153                case spv::OpImageRead:
2154                case spv::OpImage:
2155                case spv::OpImageQueryFormat:
2156                case spv::OpImageQueryOrder:
2157                case spv::OpImageQuerySizeLod:
2158                case spv::OpImageQuerySize:
2159                case spv::OpImageQueryLod:
2160                case spv::OpImageQueryLevels:
2161                case spv::OpImageQuerySamples:
2162                case spv::OpImageSparseSampleImplicitLod:
2163                case spv::OpImageSparseSampleExplicitLod:
2164                case spv::OpImageSparseSampleDrefImplicitLod:
2165                case spv::OpImageSparseSampleDrefExplicitLod:
2166                case spv::OpImageSparseSampleProjImplicitLod:
2167                case spv::OpImageSparseSampleProjExplicitLod:
2168                case spv::OpImageSparseSampleProjDrefImplicitLod:
2169                case spv::OpImageSparseSampleProjDrefExplicitLod:
2170                case spv::OpImageSparseFetch:
2171                case spv::OpImageSparseGather:
2172                case spv::OpImageSparseDrefGather:
2173                case spv::OpImageTexelPointer:
2174                    worklist.insert(insn.word(3)); /* image or sampled image */
2175                    break;
2176                case spv::OpImageWrite:
2177                    worklist.insert(insn.word(1)); /* image -- different operand order to above */
2178                    break;
2179                case spv::OpFunctionCall:
2180                    for (uint32_t i = 3; i < insn.len(); i++) {
2181                        worklist.insert(insn.word(i)); /* fn itself, and all args */
2182                    }
2183                    break;
2184
2185                case spv::OpExtInst:
2186                    for (uint32_t i = 5; i < insn.len(); i++) {
2187                        worklist.insert(insn.word(i)); /* operands to ext inst */
2188                    }
2189                    break;
2190                }
2191            }
2192            break;
2193        }
2194    }
2195
2196    return ids;
2197}
2198
2199static bool validate_push_constant_block_against_pipeline(debug_report_data *report_data,
2200                                                          std::vector<VkPushConstantRange> const *push_constant_ranges,
2201                                                          shader_module const *src, spirv_inst_iter type,
2202                                                          VkShaderStageFlagBits stage) {
2203    bool pass = true;
2204
2205    /* strip off ptrs etc */
2206    type = get_struct_type(src, type, false);
2207    assert(type != src->end());
2208
2209    /* validate directly off the offsets. this isn't quite correct for arrays
2210     * and matrices, but is a good first step. TODO: arrays, matrices, weird
2211     * sizes */
2212    for (auto insn : *src) {
2213        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
2214
2215            if (insn.word(3) == spv::DecorationOffset) {
2216                unsigned offset = insn.word(4);
2217                auto size = 4; /* bytes; TODO: calculate this based on the type */
2218
2219                bool found_range = false;
2220                for (auto const &range : *push_constant_ranges) {
2221                    if (range.offset <= offset && range.offset + range.size >= offset + size) {
2222                        found_range = true;
2223
2224                        if ((range.stageFlags & stage) == 0) {
2225                            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2226                                        __LINE__, SHADER_CHECKER_PUSH_CONSTANT_NOT_ACCESSIBLE_FROM_STAGE, "SC",
2227                                        "Push constant range covering variable starting at "
2228                                        "offset %u not accessible from stage %s",
2229                                        offset, string_VkShaderStageFlagBits(stage))) {
2230                                pass = false;
2231                            }
2232                        }
2233
2234                        break;
2235                    }
2236                }
2237
2238                if (!found_range) {
2239                    if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2240                                __LINE__, SHADER_CHECKER_PUSH_CONSTANT_OUT_OF_RANGE, "SC",
2241                                "Push constant range covering variable starting at "
2242                                "offset %u not declared in layout",
2243                                offset)) {
2244                        pass = false;
2245                    }
2246                }
2247            }
2248        }
2249    }
2250
2251    return pass;
2252}
2253
2254static bool validate_push_constant_usage(debug_report_data *report_data,
2255                                         std::vector<VkPushConstantRange> const *push_constant_ranges, shader_module const *src,
2256                                         std::unordered_set<uint32_t> accessible_ids, VkShaderStageFlagBits stage) {
2257    bool pass = true;
2258
2259    for (auto id : accessible_ids) {
2260        auto def_insn = src->get_def(id);
2261        if (def_insn.opcode() == spv::OpVariable && def_insn.word(3) == spv::StorageClassPushConstant) {
2262            pass &= validate_push_constant_block_against_pipeline(report_data, push_constant_ranges, src,
2263                                                                  src->get_def(def_insn.word(1)), stage);
2264        }
2265    }
2266
2267    return pass;
2268}
2269
2270// For given pipelineLayout verify that the set_layout_node at slot.first
2271//  has the requested binding at slot.second and return ptr to that binding
2272static VkDescriptorSetLayoutBinding const * get_descriptor_binding(PIPELINE_LAYOUT_NODE const *pipelineLayout, descriptor_slot_t slot) {
2273
2274    if (!pipelineLayout)
2275        return nullptr;
2276
2277    if (slot.first >= pipelineLayout->set_layouts.size())
2278        return nullptr;
2279
2280    return pipelineLayout->set_layouts[slot.first]->GetDescriptorSetLayoutBindingPtrFromBinding(slot.second);
2281}
2282
2283// Block of code at start here for managing/tracking Pipeline state that this layer cares about
2284
2285static uint64_t g_drawCount[NUM_DRAW_TYPES] = {0, 0, 0, 0};
2286
2287// TODO : Should be tracking lastBound per commandBuffer and when draws occur, report based on that cmd buffer lastBound
2288//   Then need to synchronize the accesses based on cmd buffer so that if I'm reading state on one cmd buffer, updates
2289//   to that same cmd buffer by separate thread are not changing state from underneath us
2290// Track the last cmd buffer touched by this thread
2291
2292static bool hasDrawCmd(GLOBAL_CB_NODE *pCB) {
2293    for (uint32_t i = 0; i < NUM_DRAW_TYPES; i++) {
2294        if (pCB->drawCount[i])
2295            return true;
2296    }
2297    return false;
2298}
2299
2300// Check object status for selected flag state
2301static bool validate_status(layer_data *my_data, GLOBAL_CB_NODE *pNode, CBStatusFlags status_mask, VkFlags msg_flags,
2302                            DRAW_STATE_ERROR error_code, const char *fail_msg) {
2303    if (!(pNode->status & status_mask)) {
2304        return log_msg(my_data->report_data, msg_flags, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2305                       reinterpret_cast<const uint64_t &>(pNode->commandBuffer), __LINE__, error_code, "DS",
2306                       "CB object 0x%" PRIxLEAST64 ": %s", reinterpret_cast<const uint64_t &>(pNode->commandBuffer), fail_msg);
2307    }
2308    return false;
2309}
2310
2311// Retrieve pipeline node ptr for given pipeline object
2312static PIPELINE_STATE *getPipelineState(layer_data const *my_data, VkPipeline pipeline) {
2313    auto it = my_data->pipelineMap.find(pipeline);
2314    if (it == my_data->pipelineMap.end()) {
2315        return nullptr;
2316    }
2317    return it->second;
2318}
2319
2320static RENDER_PASS_NODE *getRenderPass(layer_data const *my_data, VkRenderPass renderpass) {
2321    auto it = my_data->renderPassMap.find(renderpass);
2322    if (it == my_data->renderPassMap.end()) {
2323        return nullptr;
2324    }
2325    return it->second.get();
2326}
2327
2328static FRAMEBUFFER_STATE *getFramebufferState(const layer_data *my_data, VkFramebuffer framebuffer) {
2329    auto it = my_data->frameBufferMap.find(framebuffer);
2330    if (it == my_data->frameBufferMap.end()) {
2331        return nullptr;
2332    }
2333    return it->second.get();
2334}
2335
2336cvdescriptorset::DescriptorSetLayout const *getDescriptorSetLayout(layer_data const *my_data, VkDescriptorSetLayout dsLayout) {
2337    auto it = my_data->descriptorSetLayoutMap.find(dsLayout);
2338    if (it == my_data->descriptorSetLayoutMap.end()) {
2339        return nullptr;
2340    }
2341    return it->second;
2342}
2343
2344static PIPELINE_LAYOUT_NODE const *getPipelineStateLayout(layer_data const *my_data, VkPipelineLayout pipeLayout) {
2345    auto it = my_data->pipelineLayoutMap.find(pipeLayout);
2346    if (it == my_data->pipelineLayoutMap.end()) {
2347        return nullptr;
2348    }
2349    return &it->second;
2350}
2351
2352// Return true if for a given PSO, the given state enum is dynamic, else return false
2353static bool isDynamic(const PIPELINE_STATE *pPipeline, const VkDynamicState state) {
2354    if (pPipeline && pPipeline->graphicsPipelineCI.pDynamicState) {
2355        for (uint32_t i = 0; i < pPipeline->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
2356            if (state == pPipeline->graphicsPipelineCI.pDynamicState->pDynamicStates[i])
2357                return true;
2358        }
2359    }
2360    return false;
2361}
2362
2363// Validate state stored as flags at time of draw call
2364static bool validate_draw_state_flags(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const PIPELINE_STATE *pPipe, bool indexedDraw) {
2365    bool result = false;
2366    if (pPipe->graphicsPipelineCI.pInputAssemblyState &&
2367        ((pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST) ||
2368         (pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP))) {
2369        result |= validate_status(dev_data, pCB, CBSTATUS_LINE_WIDTH_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2370                                  DRAWSTATE_LINE_WIDTH_NOT_BOUND, "Dynamic line width state not set for this command buffer");
2371    }
2372    if (pPipe->graphicsPipelineCI.pRasterizationState &&
2373        (pPipe->graphicsPipelineCI.pRasterizationState->depthBiasEnable == VK_TRUE)) {
2374        result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BIAS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2375                                  DRAWSTATE_DEPTH_BIAS_NOT_BOUND, "Dynamic depth bias state not set for this command buffer");
2376    }
2377    if (pPipe->blendConstantsEnabled) {
2378        result |= validate_status(dev_data, pCB, CBSTATUS_BLEND_CONSTANTS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2379                                  DRAWSTATE_BLEND_NOT_BOUND, "Dynamic blend constants state not set for this command buffer");
2380    }
2381    if (pPipe->graphicsPipelineCI.pDepthStencilState &&
2382        (pPipe->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE)) {
2383        result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BOUNDS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2384                                  DRAWSTATE_DEPTH_BOUNDS_NOT_BOUND, "Dynamic depth bounds state not set for this command buffer");
2385    }
2386    if (pPipe->graphicsPipelineCI.pDepthStencilState &&
2387        (pPipe->graphicsPipelineCI.pDepthStencilState->stencilTestEnable == VK_TRUE)) {
2388        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_READ_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2389                                  DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil read mask state not set for this command buffer");
2390        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_WRITE_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2391                                  DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil write mask state not set for this command buffer");
2392        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_REFERENCE_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2393                                  DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil reference state not set for this command buffer");
2394    }
2395    if (indexedDraw) {
2396        result |= validate_status(dev_data, pCB, CBSTATUS_INDEX_BUFFER_BOUND, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2397                                  DRAWSTATE_INDEX_BUFFER_NOT_BOUND,
2398                                  "Index buffer object not bound to this command buffer when Indexed Draw attempted");
2399    }
2400    return result;
2401}
2402
2403// Verify attachment reference compatibility according to spec
2404//  If one array is larger, treat missing elements of shorter array as VK_ATTACHMENT_UNUSED & other array much match this
2405//  If both AttachmentReference arrays have requested index, check their corresponding AttachmentDescriptions
2406//   to make sure that format and samples counts match.
2407//  If not, they are not compatible.
2408static bool attachment_references_compatible(const uint32_t index, const VkAttachmentReference *pPrimary,
2409                                             const uint32_t primaryCount, const VkAttachmentDescription *pPrimaryAttachments,
2410                                             const VkAttachmentReference *pSecondary, const uint32_t secondaryCount,
2411                                             const VkAttachmentDescription *pSecondaryAttachments) {
2412    // Check potential NULL cases first to avoid nullptr issues later
2413    if (pPrimary == nullptr) {
2414        if (pSecondary == nullptr) {
2415            return true;
2416        }
2417        return false;
2418    } else if (pSecondary == nullptr) {
2419        return false;
2420    }
2421    if (index >= primaryCount) { // Check secondary as if primary is VK_ATTACHMENT_UNUSED
2422        if (VK_ATTACHMENT_UNUSED == pSecondary[index].attachment)
2423            return true;
2424    } else if (index >= secondaryCount) { // Check primary as if secondary is VK_ATTACHMENT_UNUSED
2425        if (VK_ATTACHMENT_UNUSED == pPrimary[index].attachment)
2426            return true;
2427    } else { // Format and sample count must match
2428        if ((pPrimary[index].attachment == VK_ATTACHMENT_UNUSED) && (pSecondary[index].attachment == VK_ATTACHMENT_UNUSED)) {
2429            return true;
2430        } else if ((pPrimary[index].attachment == VK_ATTACHMENT_UNUSED) || (pSecondary[index].attachment == VK_ATTACHMENT_UNUSED)) {
2431            return false;
2432        }
2433        if ((pPrimaryAttachments[pPrimary[index].attachment].format ==
2434             pSecondaryAttachments[pSecondary[index].attachment].format) &&
2435            (pPrimaryAttachments[pPrimary[index].attachment].samples ==
2436             pSecondaryAttachments[pSecondary[index].attachment].samples))
2437            return true;
2438    }
2439    // Format and sample counts didn't match
2440    return false;
2441}
2442// TODO : Scrub verify_renderpass_compatibility() and validateRenderPassCompatibility() and unify them and/or share code
2443// For given primary RenderPass object and secondry RenderPassCreateInfo, verify that they're compatible
2444static bool verify_renderpass_compatibility(const layer_data *my_data, const VkRenderPassCreateInfo *primaryRPCI,
2445                                            const VkRenderPassCreateInfo *secondaryRPCI, string &errorMsg) {
2446    if (primaryRPCI->subpassCount != secondaryRPCI->subpassCount) {
2447        stringstream errorStr;
2448        errorStr << "RenderPass for primary cmdBuffer has " << primaryRPCI->subpassCount
2449                 << " subpasses but renderPass for secondary cmdBuffer has " << secondaryRPCI->subpassCount << " subpasses.";
2450        errorMsg = errorStr.str();
2451        return false;
2452    }
2453    uint32_t spIndex = 0;
2454    for (spIndex = 0; spIndex < primaryRPCI->subpassCount; ++spIndex) {
2455        // For each subpass, verify that corresponding color, input, resolve & depth/stencil attachment references are compatible
2456        uint32_t primaryColorCount = primaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2457        uint32_t secondaryColorCount = secondaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2458        uint32_t colorMax = std::max(primaryColorCount, secondaryColorCount);
2459        for (uint32_t cIdx = 0; cIdx < colorMax; ++cIdx) {
2460            if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pColorAttachments, primaryColorCount,
2461                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pColorAttachments,
2462                                                  secondaryColorCount, secondaryRPCI->pAttachments)) {
2463                stringstream errorStr;
2464                errorStr << "color attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2465                errorMsg = errorStr.str();
2466                return false;
2467            } else if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2468                                                         primaryColorCount, primaryRPCI->pAttachments,
2469                                                         secondaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2470                                                         secondaryColorCount, secondaryRPCI->pAttachments)) {
2471                stringstream errorStr;
2472                errorStr << "resolve attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2473                errorMsg = errorStr.str();
2474                return false;
2475            }
2476        }
2477
2478        if (!attachment_references_compatible(0, primaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2479                                              1, primaryRPCI->pAttachments,
2480                                              secondaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2481                                              1, secondaryRPCI->pAttachments)) {
2482            stringstream errorStr;
2483            errorStr << "depth/stencil attachments of subpass index " << spIndex << " are not compatible.";
2484            errorMsg = errorStr.str();
2485            return false;
2486        }
2487
2488        uint32_t primaryInputCount = primaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2489        uint32_t secondaryInputCount = secondaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2490        uint32_t inputMax = std::max(primaryInputCount, secondaryInputCount);
2491        for (uint32_t i = 0; i < inputMax; ++i) {
2492            if (!attachment_references_compatible(i, primaryRPCI->pSubpasses[spIndex].pInputAttachments, primaryColorCount,
2493                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pInputAttachments,
2494                                                  secondaryColorCount, secondaryRPCI->pAttachments)) {
2495                stringstream errorStr;
2496                errorStr << "input attachments at index " << i << " of subpass index " << spIndex << " are not compatible.";
2497                errorMsg = errorStr.str();
2498                return false;
2499            }
2500        }
2501    }
2502    return true;
2503}
2504
2505// For given cvdescriptorset::DescriptorSet, verify that its Set is compatible w/ the setLayout corresponding to
2506// pipelineLayout[layoutIndex]
2507static bool verify_set_layout_compatibility(layer_data *my_data, const cvdescriptorset::DescriptorSet *pSet,
2508                                            PIPELINE_LAYOUT_NODE const *pipeline_layout, const uint32_t layoutIndex,
2509                                            string &errorMsg) {
2510    auto num_sets = pipeline_layout->set_layouts.size();
2511    if (layoutIndex >= num_sets) {
2512        stringstream errorStr;
2513        errorStr << "VkPipelineLayout (" << pipeline_layout->layout << ") only contains " << num_sets
2514                 << " setLayouts corresponding to sets 0-" << num_sets - 1 << ", but you're attempting to bind set to index "
2515                 << layoutIndex;
2516        errorMsg = errorStr.str();
2517        return false;
2518    }
2519    auto layout_node = pipeline_layout->set_layouts[layoutIndex];
2520    return pSet->IsCompatible(layout_node, &errorMsg);
2521}
2522
2523// Validate that data for each specialization entry is fully contained within the buffer.
2524static bool validate_specialization_offsets(debug_report_data *report_data, VkPipelineShaderStageCreateInfo const *info) {
2525    bool pass = true;
2526
2527    VkSpecializationInfo const *spec = info->pSpecializationInfo;
2528
2529    if (spec) {
2530        for (auto i = 0u; i < spec->mapEntryCount; i++) {
2531            if (spec->pMapEntries[i].offset + spec->pMapEntries[i].size > spec->dataSize) {
2532                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2533                            /*dev*/ 0, __LINE__, SHADER_CHECKER_BAD_SPECIALIZATION, "SC",
2534                            "Specialization entry %u (for constant id %u) references memory outside provided "
2535                            "specialization data (bytes %u.." PRINTF_SIZE_T_SPECIFIER "; " PRINTF_SIZE_T_SPECIFIER
2536                            " bytes provided)",
2537                            i, spec->pMapEntries[i].constantID, spec->pMapEntries[i].offset,
2538                            spec->pMapEntries[i].offset + spec->pMapEntries[i].size - 1, spec->dataSize)) {
2539
2540                    pass = false;
2541                }
2542            }
2543        }
2544    }
2545
2546    return pass;
2547}
2548
2549static bool descriptor_type_match(shader_module const *module, uint32_t type_id,
2550                                  VkDescriptorType descriptor_type, unsigned &descriptor_count) {
2551    auto type = module->get_def(type_id);
2552
2553    descriptor_count = 1;
2554
2555    /* Strip off any array or ptrs. Where we remove array levels, adjust the
2556     * descriptor count for each dimension. */
2557    while (type.opcode() == spv::OpTypeArray || type.opcode() == spv::OpTypePointer) {
2558        if (type.opcode() == spv::OpTypeArray) {
2559            descriptor_count *= get_constant_value(module, type.word(3));
2560            type = module->get_def(type.word(2));
2561        }
2562        else {
2563            type = module->get_def(type.word(3));
2564        }
2565    }
2566
2567    switch (type.opcode()) {
2568    case spv::OpTypeStruct: {
2569        for (auto insn : *module) {
2570            if (insn.opcode() == spv::OpDecorate && insn.word(1) == type.word(1)) {
2571                if (insn.word(2) == spv::DecorationBlock) {
2572                    return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
2573                           descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
2574                } else if (insn.word(2) == spv::DecorationBufferBlock) {
2575                    return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
2576                           descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
2577                }
2578            }
2579        }
2580
2581        /* Invalid */
2582        return false;
2583    }
2584
2585    case spv::OpTypeSampler:
2586        return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLER ||
2587            descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
2588
2589    case spv::OpTypeSampledImage:
2590        if (descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) {
2591            /* Slight relaxation for some GLSL historical madness: samplerBuffer
2592             * doesn't really have a sampler, and a texel buffer descriptor
2593             * doesn't really provide one. Allow this slight mismatch.
2594             */
2595            auto image_type = module->get_def(type.word(2));
2596            auto dim = image_type.word(3);
2597            auto sampled = image_type.word(7);
2598            return dim == spv::DimBuffer && sampled == 1;
2599        }
2600        return descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
2601
2602    case spv::OpTypeImage: {
2603        /* Many descriptor types backing image types-- depends on dimension
2604         * and whether the image will be used with a sampler. SPIRV for
2605         * Vulkan requires that sampled be 1 or 2 -- leaving the decision to
2606         * runtime is unacceptable.
2607         */
2608        auto dim = type.word(3);
2609        auto sampled = type.word(7);
2610
2611        if (dim == spv::DimSubpassData) {
2612            return descriptor_type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
2613        } else if (dim == spv::DimBuffer) {
2614            if (sampled == 1) {
2615                return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
2616            } else {
2617                return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
2618            }
2619        } else if (sampled == 1) {
2620            return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE ||
2621                descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
2622        } else {
2623            return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
2624        }
2625    }
2626
2627    /* We shouldn't really see any other junk types -- but if we do, they're
2628     * a mismatch.
2629     */
2630    default:
2631        return false; /* Mismatch */
2632    }
2633}
2634
2635static bool require_feature(debug_report_data *report_data, VkBool32 feature, char const *feature_name) {
2636    if (!feature) {
2637        if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2638                    __LINE__, SHADER_CHECKER_FEATURE_NOT_ENABLED, "SC",
2639                    "Shader requires VkPhysicalDeviceFeatures::%s but is not "
2640                    "enabled on the device",
2641                    feature_name)) {
2642            return false;
2643        }
2644    }
2645
2646    return true;
2647}
2648
2649static bool validate_shader_capabilities(debug_report_data *report_data, shader_module const *src,
2650                                         VkPhysicalDeviceFeatures const *enabledFeatures) {
2651    bool pass = true;
2652
2653
2654    for (auto insn : *src) {
2655        if (insn.opcode() == spv::OpCapability) {
2656            switch (insn.word(1)) {
2657            case spv::CapabilityMatrix:
2658            case spv::CapabilityShader:
2659            case spv::CapabilityInputAttachment:
2660            case spv::CapabilitySampled1D:
2661            case spv::CapabilityImage1D:
2662            case spv::CapabilitySampledBuffer:
2663            case spv::CapabilityImageBuffer:
2664            case spv::CapabilityImageQuery:
2665            case spv::CapabilityDerivativeControl:
2666                // Always supported by a Vulkan 1.0 implementation -- no feature bits.
2667                break;
2668
2669            case spv::CapabilityGeometry:
2670                pass &= require_feature(report_data, enabledFeatures->geometryShader, "geometryShader");
2671                break;
2672
2673            case spv::CapabilityTessellation:
2674                pass &= require_feature(report_data, enabledFeatures->tessellationShader, "tessellationShader");
2675                break;
2676
2677            case spv::CapabilityFloat64:
2678                pass &= require_feature(report_data, enabledFeatures->shaderFloat64, "shaderFloat64");
2679                break;
2680
2681            case spv::CapabilityInt64:
2682                pass &= require_feature(report_data, enabledFeatures->shaderInt64, "shaderInt64");
2683                break;
2684
2685            case spv::CapabilityTessellationPointSize:
2686            case spv::CapabilityGeometryPointSize:
2687                pass &= require_feature(report_data, enabledFeatures->shaderTessellationAndGeometryPointSize,
2688                                        "shaderTessellationAndGeometryPointSize");
2689                break;
2690
2691            case spv::CapabilityImageGatherExtended:
2692                pass &= require_feature(report_data, enabledFeatures->shaderImageGatherExtended, "shaderImageGatherExtended");
2693                break;
2694
2695            case spv::CapabilityStorageImageMultisample:
2696                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample");
2697                break;
2698
2699            case spv::CapabilityUniformBufferArrayDynamicIndexing:
2700                pass &= require_feature(report_data, enabledFeatures->shaderUniformBufferArrayDynamicIndexing,
2701                                        "shaderUniformBufferArrayDynamicIndexing");
2702                break;
2703
2704            case spv::CapabilitySampledImageArrayDynamicIndexing:
2705                pass &= require_feature(report_data, enabledFeatures->shaderSampledImageArrayDynamicIndexing,
2706                                        "shaderSampledImageArrayDynamicIndexing");
2707                break;
2708
2709            case spv::CapabilityStorageBufferArrayDynamicIndexing:
2710                pass &= require_feature(report_data, enabledFeatures->shaderStorageBufferArrayDynamicIndexing,
2711                                        "shaderStorageBufferArrayDynamicIndexing");
2712                break;
2713
2714            case spv::CapabilityStorageImageArrayDynamicIndexing:
2715                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageArrayDynamicIndexing,
2716                                        "shaderStorageImageArrayDynamicIndexing");
2717                break;
2718
2719            case spv::CapabilityClipDistance:
2720                pass &= require_feature(report_data, enabledFeatures->shaderClipDistance, "shaderClipDistance");
2721                break;
2722
2723            case spv::CapabilityCullDistance:
2724                pass &= require_feature(report_data, enabledFeatures->shaderCullDistance, "shaderCullDistance");
2725                break;
2726
2727            case spv::CapabilityImageCubeArray:
2728                pass &= require_feature(report_data, enabledFeatures->imageCubeArray, "imageCubeArray");
2729                break;
2730
2731            case spv::CapabilitySampleRateShading:
2732                pass &= require_feature(report_data, enabledFeatures->sampleRateShading, "sampleRateShading");
2733                break;
2734
2735            case spv::CapabilitySparseResidency:
2736                pass &= require_feature(report_data, enabledFeatures->shaderResourceResidency, "shaderResourceResidency");
2737                break;
2738
2739            case spv::CapabilityMinLod:
2740                pass &= require_feature(report_data, enabledFeatures->shaderResourceMinLod, "shaderResourceMinLod");
2741                break;
2742
2743            case spv::CapabilitySampledCubeArray:
2744                pass &= require_feature(report_data, enabledFeatures->imageCubeArray, "imageCubeArray");
2745                break;
2746
2747            case spv::CapabilityImageMSArray:
2748                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample");
2749                break;
2750
2751            case spv::CapabilityStorageImageExtendedFormats:
2752                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageExtendedFormats,
2753                                        "shaderStorageImageExtendedFormats");
2754                break;
2755
2756            case spv::CapabilityInterpolationFunction:
2757                pass &= require_feature(report_data, enabledFeatures->sampleRateShading, "sampleRateShading");
2758                break;
2759
2760            case spv::CapabilityStorageImageReadWithoutFormat:
2761                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageReadWithoutFormat,
2762                                        "shaderStorageImageReadWithoutFormat");
2763                break;
2764
2765            case spv::CapabilityStorageImageWriteWithoutFormat:
2766                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageWriteWithoutFormat,
2767                                        "shaderStorageImageWriteWithoutFormat");
2768                break;
2769
2770            case spv::CapabilityMultiViewport:
2771                pass &= require_feature(report_data, enabledFeatures->multiViewport, "multiViewport");
2772                break;
2773
2774            default:
2775                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2776                            __LINE__, SHADER_CHECKER_BAD_CAPABILITY, "SC",
2777                            "Shader declares capability %u, not supported in Vulkan.",
2778                            insn.word(1)))
2779                    pass = false;
2780                break;
2781            }
2782        }
2783    }
2784
2785    return pass;
2786}
2787
2788
2789static uint32_t descriptor_type_to_reqs(shader_module const *module, uint32_t type_id) {
2790    auto type = module->get_def(type_id);
2791
2792    while (true) {
2793        switch (type.opcode()) {
2794        case spv::OpTypeArray:
2795        case spv::OpTypeSampledImage:
2796            type = module->get_def(type.word(2));
2797            break;
2798        case spv::OpTypePointer:
2799            type = module->get_def(type.word(3));
2800            break;
2801        case spv::OpTypeImage: {
2802            auto dim = type.word(3);
2803            auto arrayed = type.word(5);
2804            auto msaa = type.word(6);
2805
2806            switch (dim) {
2807            case spv::Dim1D:
2808                return arrayed ? DESCRIPTOR_REQ_VIEW_TYPE_1D_ARRAY : DESCRIPTOR_REQ_VIEW_TYPE_1D;
2809            case spv::Dim2D:
2810                return (msaa ? DESCRIPTOR_REQ_MULTI_SAMPLE : DESCRIPTOR_REQ_SINGLE_SAMPLE) |
2811                    (arrayed ? DESCRIPTOR_REQ_VIEW_TYPE_2D_ARRAY : DESCRIPTOR_REQ_VIEW_TYPE_2D);
2812            case spv::Dim3D:
2813                return DESCRIPTOR_REQ_VIEW_TYPE_3D;
2814            case spv::DimCube:
2815                return arrayed ? DESCRIPTOR_REQ_VIEW_TYPE_CUBE_ARRAY : DESCRIPTOR_REQ_VIEW_TYPE_CUBE;
2816            case spv::DimSubpassData:
2817                return msaa ? DESCRIPTOR_REQ_MULTI_SAMPLE : DESCRIPTOR_REQ_SINGLE_SAMPLE;
2818            default:  // buffer, etc.
2819                return 0;
2820            }
2821        }
2822        default:
2823            return 0;
2824        }
2825    }
2826}
2827
2828static bool
2829validate_pipeline_shader_stage(debug_report_data *report_data, VkPipelineShaderStageCreateInfo const *pStage,
2830                               PIPELINE_STATE *pipeline, shader_module **out_module, spirv_inst_iter *out_entrypoint,
2831                               VkPhysicalDeviceFeatures const *enabledFeatures,
2832                               std::unordered_map<VkShaderModule, std::unique_ptr<shader_module>> const &shaderModuleMap) {
2833    bool pass = true;
2834    auto module_it = shaderModuleMap.find(pStage->module);
2835    auto module = *out_module = module_it->second.get();
2836
2837    /* find the entrypoint */
2838    auto entrypoint = *out_entrypoint = find_entrypoint(module, pStage->pName, pStage->stage);
2839    if (entrypoint == module->end()) {
2840        if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2841                    __LINE__, SHADER_CHECKER_MISSING_ENTRYPOINT, "SC",
2842                    "No entrypoint found named `%s` for stage %s", pStage->pName,
2843                    string_VkShaderStageFlagBits(pStage->stage))) {
2844            return false;   // no point continuing beyond here, any analysis is just going to be garbage.
2845        }
2846    }
2847
2848    /* validate shader capabilities against enabled device features */
2849    pass &= validate_shader_capabilities(report_data, module, enabledFeatures);
2850
2851    /* mark accessible ids */
2852    auto accessible_ids = mark_accessible_ids(module, entrypoint);
2853
2854    /* validate descriptor set layout against what the entrypoint actually uses */
2855    auto descriptor_uses = collect_interface_by_descriptor_slot(report_data, module, accessible_ids);
2856
2857    auto pipelineLayout = pipeline->pipeline_layout;
2858
2859    pass &= validate_specialization_offsets(report_data, pStage);
2860    pass &= validate_push_constant_usage(report_data, &pipelineLayout.push_constant_ranges, module, accessible_ids, pStage->stage);
2861
2862    /* validate descriptor use */
2863    for (auto use : descriptor_uses) {
2864        // While validating shaders capture which slots are used by the pipeline
2865        auto & reqs = pipeline->active_slots[use.first.first][use.first.second];
2866        reqs = descriptor_req(reqs | descriptor_type_to_reqs(module, use.second.type_id));
2867
2868        /* verify given pipelineLayout has requested setLayout with requested binding */
2869        const auto &binding = get_descriptor_binding(&pipelineLayout, use.first);
2870        unsigned required_descriptor_count;
2871
2872        if (!binding) {
2873            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2874                        __LINE__, SHADER_CHECKER_MISSING_DESCRIPTOR, "SC",
2875                        "Shader uses descriptor slot %u.%u (used as type `%s`) but not declared in pipeline layout",
2876                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str())) {
2877                pass = false;
2878            }
2879        } else if (~binding->stageFlags & pStage->stage) {
2880            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2881                        /*dev*/ 0, __LINE__, SHADER_CHECKER_DESCRIPTOR_NOT_ACCESSIBLE_FROM_STAGE, "SC",
2882                        "Shader uses descriptor slot %u.%u (used "
2883                        "as type `%s`) but descriptor not "
2884                        "accessible from stage %s",
2885                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str(),
2886                        string_VkShaderStageFlagBits(pStage->stage))) {
2887                pass = false;
2888            }
2889        } else if (!descriptor_type_match(module, use.second.type_id, binding->descriptorType,
2890                                          /*out*/ required_descriptor_count)) {
2891            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2892                        SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC", "Type mismatch on descriptor slot "
2893                                                                       "%u.%u (used as type `%s`) but "
2894                                                                       "descriptor of type %s",
2895                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str(),
2896                        string_VkDescriptorType(binding->descriptorType))) {
2897                pass = false;
2898            }
2899        } else if (binding->descriptorCount < required_descriptor_count) {
2900            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2901                        SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC",
2902                        "Shader expects at least %u descriptors for binding %u.%u (used as type `%s`) but only %u provided",
2903                        required_descriptor_count, use.first.first, use.first.second,
2904                        describe_type(module, use.second.type_id).c_str(), binding->descriptorCount)) {
2905                pass = false;
2906            }
2907        }
2908    }
2909
2910    /* validate use of input attachments against subpass structure */
2911    if (pStage->stage == VK_SHADER_STAGE_FRAGMENT_BIT) {
2912        auto input_attachment_uses = collect_interface_by_input_attachment_index(report_data, module, accessible_ids);
2913
2914        auto rpci = pipeline->render_pass_ci.ptr();
2915        auto subpass = pipeline->graphicsPipelineCI.subpass;
2916
2917        for (auto use : input_attachment_uses) {
2918            auto input_attachments = rpci->pSubpasses[subpass].pInputAttachments;
2919            auto index = (input_attachments && use.first < rpci->pSubpasses[subpass].inputAttachmentCount) ?
2920                    input_attachments[use.first].attachment : VK_ATTACHMENT_UNUSED;
2921
2922            if (index == VK_ATTACHMENT_UNUSED) {
2923                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2924                            SHADER_CHECKER_MISSING_INPUT_ATTACHMENT, "SC",
2925                            "Shader consumes input attachment index %d but not provided in subpass",
2926                            use.first)) {
2927                    pass = false;
2928                }
2929            }
2930            else if (get_format_type(rpci->pAttachments[index].format) !=
2931                    get_fundamental_type(module, use.second.type_id)) {
2932                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2933                            SHADER_CHECKER_INPUT_ATTACHMENT_TYPE_MISMATCH, "SC",
2934                            "Subpass input attachment %u format of %s does not match type used in shader `%s`",
2935                            use.first, string_VkFormat(rpci->pAttachments[index].format),
2936                            describe_type(module, use.second.type_id).c_str())) {
2937                    pass = false;
2938                }
2939            }
2940        }
2941    }
2942
2943    return pass;
2944}
2945
2946
2947// Validate that the shaders used by the given pipeline and store the active_slots
2948//  that are actually used by the pipeline into pPipeline->active_slots
2949static bool
2950validate_and_capture_pipeline_shader_state(debug_report_data *report_data, PIPELINE_STATE *pPipeline,
2951                                           VkPhysicalDeviceFeatures const *enabledFeatures,
2952                                           std::unordered_map<VkShaderModule, unique_ptr<shader_module>> const &shaderModuleMap) {
2953    auto pCreateInfo = pPipeline->graphicsPipelineCI.ptr();
2954    int vertex_stage = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2955    int fragment_stage = get_shader_stage_id(VK_SHADER_STAGE_FRAGMENT_BIT);
2956
2957    shader_module *shaders[5];
2958    memset(shaders, 0, sizeof(shaders));
2959    spirv_inst_iter entrypoints[5];
2960    memset(entrypoints, 0, sizeof(entrypoints));
2961    VkPipelineVertexInputStateCreateInfo const *vi = 0;
2962    bool pass = true;
2963
2964    for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
2965        auto pStage = &pCreateInfo->pStages[i];
2966        auto stage_id = get_shader_stage_id(pStage->stage);
2967        pass &= validate_pipeline_shader_stage(report_data, pStage, pPipeline,
2968                                               &shaders[stage_id], &entrypoints[stage_id],
2969                                               enabledFeatures, shaderModuleMap);
2970    }
2971
2972    // if the shader stages are no good individually, cross-stage validation is pointless.
2973    if (!pass)
2974        return false;
2975
2976    vi = pCreateInfo->pVertexInputState;
2977
2978    if (vi) {
2979        pass &= validate_vi_consistency(report_data, vi);
2980    }
2981
2982    if (shaders[vertex_stage]) {
2983        pass &= validate_vi_against_vs_inputs(report_data, vi, shaders[vertex_stage], entrypoints[vertex_stage]);
2984    }
2985
2986    int producer = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2987    int consumer = get_shader_stage_id(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT);
2988
2989    while (!shaders[producer] && producer != fragment_stage) {
2990        producer++;
2991        consumer++;
2992    }
2993
2994    for (; producer != fragment_stage && consumer <= fragment_stage; consumer++) {
2995        assert(shaders[producer]);
2996        if (shaders[consumer]) {
2997            pass &= validate_interface_between_stages(report_data,
2998                                                      shaders[producer], entrypoints[producer], &shader_stage_attribs[producer],
2999                                                      shaders[consumer], entrypoints[consumer], &shader_stage_attribs[consumer]);
3000
3001            producer = consumer;
3002        }
3003    }
3004
3005    if (shaders[fragment_stage]) {
3006        pass &= validate_fs_outputs_against_render_pass(report_data, shaders[fragment_stage], entrypoints[fragment_stage],
3007                                                        pPipeline->render_pass_ci.ptr(), pCreateInfo->subpass);
3008    }
3009
3010    return pass;
3011}
3012
3013static bool validate_compute_pipeline(debug_report_data *report_data, PIPELINE_STATE *pPipeline,
3014                                      VkPhysicalDeviceFeatures const *enabledFeatures,
3015                                      std::unordered_map<VkShaderModule, unique_ptr<shader_module>> const &shaderModuleMap) {
3016    auto pCreateInfo = pPipeline->computePipelineCI.ptr();
3017
3018    shader_module *module;
3019    spirv_inst_iter entrypoint;
3020
3021    return validate_pipeline_shader_stage(report_data, &pCreateInfo->stage, pPipeline,
3022                                          &module, &entrypoint, enabledFeatures, shaderModuleMap);
3023}
3024// Return Set node ptr for specified set or else NULL
3025cvdescriptorset::DescriptorSet *getSetNode(const layer_data *my_data, VkDescriptorSet set) {
3026    auto set_it = my_data->setMap.find(set);
3027    if (set_it == my_data->setMap.end()) {
3028        return NULL;
3029    }
3030    return set_it->second;
3031}
3032// For the given command buffer, verify and update the state for activeSetBindingsPairs
3033//  This includes:
3034//  1. Verifying that any dynamic descriptor in that set has a valid dynamic offset bound.
3035//     To be valid, the dynamic offset combined with the offset and range from its
3036//     descriptor update must not overflow the size of its buffer being updated
3037//  2. Grow updateImages for given pCB to include any bound STORAGE_IMAGE descriptor images
3038//  3. Grow updateBuffers for pCB to include buffers from STORAGE*_BUFFER descriptor buffers
3039static bool validate_and_update_drawtime_descriptor_state(
3040    layer_data *dev_data, GLOBAL_CB_NODE *pCB,
3041    const vector<std::tuple<cvdescriptorset::DescriptorSet *, std::map<uint32_t, descriptor_req>, std::vector<uint32_t> const *>>
3042        &activeSetBindingsPairs,
3043    const char *function) {
3044    bool result = false;
3045    for (auto set_bindings_pair : activeSetBindingsPairs) {
3046        cvdescriptorset::DescriptorSet *set_node = std::get<0>(set_bindings_pair);
3047        std::string err_str;
3048        if (!set_node->ValidateDrawState(std::get<1>(set_bindings_pair), *std::get<2>(set_bindings_pair),
3049                                         &err_str)) {
3050            // Report error here
3051            auto set = set_node->GetSet();
3052            result |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3053                              reinterpret_cast<const uint64_t &>(set), __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
3054                              "DS 0x%" PRIxLEAST64 " encountered the following validation error at %s() time: %s",
3055                              reinterpret_cast<const uint64_t &>(set), function, err_str.c_str());
3056        }
3057        set_node->GetStorageUpdates(std::get<1>(set_bindings_pair), &pCB->updateBuffers, &pCB->updateImages);
3058    }
3059    return result;
3060}
3061
3062// For given pipeline, return number of MSAA samples, or one if MSAA disabled
3063static VkSampleCountFlagBits getNumSamples(PIPELINE_STATE const *pipe) {
3064    if (pipe->graphicsPipelineCI.pMultisampleState != NULL &&
3065        VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO == pipe->graphicsPipelineCI.pMultisampleState->sType) {
3066        return pipe->graphicsPipelineCI.pMultisampleState->rasterizationSamples;
3067    }
3068    return VK_SAMPLE_COUNT_1_BIT;
3069}
3070
3071static void list_bits(std::ostream& s, uint32_t bits) {
3072    for (int i = 0; i < 32 && bits; i++) {
3073        if (bits & (1 << i)) {
3074            s << i;
3075            bits &= ~(1 << i);
3076            if (bits) {
3077                s << ",";
3078            }
3079        }
3080    }
3081}
3082
3083// Validate draw-time state related to the PSO
3084static bool validatePipelineDrawtimeState(layer_data const *my_data, LAST_BOUND_STATE const &state, const GLOBAL_CB_NODE *pCB,
3085                                          PIPELINE_STATE const *pPipeline) {
3086    bool skip_call = false;
3087
3088    // Verify Vtx binding
3089    if (pPipeline->vertexBindingDescriptions.size() > 0) {
3090        for (size_t i = 0; i < pPipeline->vertexBindingDescriptions.size(); i++) {
3091            auto vertex_binding = pPipeline->vertexBindingDescriptions[i].binding;
3092            if ((pCB->currentDrawData.buffers.size() < (vertex_binding + 1)) ||
3093                (pCB->currentDrawData.buffers[vertex_binding] == VK_NULL_HANDLE)) {
3094                skip_call |= log_msg(
3095                    my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3096                    DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
3097                    "The Pipeline State Object (0x%" PRIxLEAST64 ") expects that this Command Buffer's vertex binding Index %u "
3098                    "should be set via vkCmdBindVertexBuffers. This is because VkVertexInputBindingDescription struct "
3099                    "at index " PRINTF_SIZE_T_SPECIFIER " of pVertexBindingDescriptions has a binding value of %u.",
3100                    (uint64_t)state.pipeline_state->pipeline, vertex_binding, i, vertex_binding);
3101            }
3102        }
3103    } else {
3104        if (!pCB->currentDrawData.buffers.empty()) {
3105            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
3106                                 0, __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
3107                                 "Vertex buffers are bound to command buffer (0x%" PRIxLEAST64
3108                                 ") but no vertex buffers are attached to this Pipeline State Object (0x%" PRIxLEAST64 ").",
3109                                 (uint64_t)pCB->commandBuffer, (uint64_t)state.pipeline_state->pipeline);
3110        }
3111    }
3112    // If Viewport or scissors are dynamic, verify that dynamic count matches PSO count.
3113    // Skip check if rasterization is disabled or there is no viewport.
3114    if ((!pPipeline->graphicsPipelineCI.pRasterizationState ||
3115         (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) &&
3116        pPipeline->graphicsPipelineCI.pViewportState) {
3117        bool dynViewport = isDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
3118        bool dynScissor = isDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
3119
3120        if (dynViewport) {
3121            auto requiredViewportsMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->viewportCount) - 1;
3122            auto missingViewportMask = ~pCB->viewportMask & requiredViewportsMask;
3123            if (missingViewportMask) {
3124                std::stringstream ss;
3125                ss << "Dynamic viewport(s) ";
3126                list_bits(ss, missingViewportMask);
3127                ss << " are used by PSO, but were not provided via calls to vkCmdSetViewport().";
3128                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
3129                                     __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3130                                     "%s", ss.str().c_str());
3131            }
3132        }
3133
3134        if (dynScissor) {
3135            auto requiredScissorMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->scissorCount) - 1;
3136            auto missingScissorMask = ~pCB->scissorMask & requiredScissorMask;
3137            if (missingScissorMask) {
3138                std::stringstream ss;
3139                ss << "Dynamic scissor(s) ";
3140                list_bits(ss, missingScissorMask);
3141                ss << " are used by PSO, but were not provided via calls to vkCmdSetScissor().";
3142                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
3143                                     __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3144                                     "%s", ss.str().c_str());
3145            }
3146        }
3147    }
3148
3149    // Verify that any MSAA request in PSO matches sample# in bound FB
3150    // Skip the check if rasterization is disabled.
3151    if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
3152        (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
3153        VkSampleCountFlagBits pso_num_samples = getNumSamples(pPipeline);
3154        if (pCB->activeRenderPass) {
3155            auto const render_pass_info = pCB->activeRenderPass->createInfo.ptr();
3156            const VkSubpassDescription *subpass_desc = &render_pass_info->pSubpasses[pCB->activeSubpass];
3157            uint32_t i;
3158
3159            const safe_VkPipelineColorBlendStateCreateInfo *color_blend_state = pPipeline->graphicsPipelineCI.pColorBlendState;
3160            if ((color_blend_state != NULL) && (pCB->activeSubpass == pPipeline->graphicsPipelineCI.subpass) &&
3161                (color_blend_state->attachmentCount != subpass_desc->colorAttachmentCount)) {
3162                skip_call |=
3163                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3164                                reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
3165                                "Render pass subpass %u mismatch with blending state defined and blend state attachment "
3166                                "count %u while subpass color attachment count %u in Pipeline (0x%" PRIxLEAST64 ")!  These "
3167                                "must be the same at draw-time.",
3168                                pCB->activeSubpass, color_blend_state->attachmentCount, subpass_desc->colorAttachmentCount,
3169                                reinterpret_cast<const uint64_t &>(pPipeline->pipeline));
3170            }
3171
3172            unsigned subpass_num_samples = 0;
3173
3174            for (i = 0; i < subpass_desc->colorAttachmentCount; i++) {
3175                auto attachment = subpass_desc->pColorAttachments[i].attachment;
3176                if (attachment != VK_ATTACHMENT_UNUSED)
3177                    subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
3178            }
3179
3180            if (subpass_desc->pDepthStencilAttachment &&
3181                subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
3182                auto attachment = subpass_desc->pDepthStencilAttachment->attachment;
3183                subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
3184            }
3185
3186            if (subpass_num_samples && static_cast<unsigned>(pso_num_samples) != subpass_num_samples) {
3187                skip_call |=
3188                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3189                                reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
3190                                "Num samples mismatch! At draw-time in Pipeline (0x%" PRIxLEAST64
3191                                ") with %u samples while current RenderPass (0x%" PRIxLEAST64 ") w/ %u samples!",
3192                                reinterpret_cast<const uint64_t &>(pPipeline->pipeline), pso_num_samples,
3193                                reinterpret_cast<const uint64_t &>(pCB->activeRenderPass->renderPass), subpass_num_samples);
3194            }
3195        } else {
3196            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3197                                 reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
3198                                 "No active render pass found at draw-time in Pipeline (0x%" PRIxLEAST64 ")!",
3199                                 reinterpret_cast<const uint64_t &>(pPipeline->pipeline));
3200        }
3201    }
3202    // Verify that PSO creation renderPass is compatible with active renderPass
3203    if (pCB->activeRenderPass) {
3204        std::string err_string;
3205        if ((pCB->activeRenderPass->renderPass != pPipeline->graphicsPipelineCI.renderPass) &&
3206            !verify_renderpass_compatibility(my_data, pCB->activeRenderPass->createInfo.ptr(), pPipeline->render_pass_ci.ptr(),
3207                                             err_string)) {
3208            // renderPass that PSO was created with must be compatible with active renderPass that PSO is being used with
3209            skip_call |=
3210                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3211                        reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
3212                        "At Draw time the active render pass (0x%" PRIxLEAST64 ") is incompatible w/ gfx pipeline "
3213                        "(0x%" PRIxLEAST64 ") that was created w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
3214                        reinterpret_cast<uint64_t &>(pCB->activeRenderPass->renderPass), reinterpret_cast<uint64_t &>(pPipeline),
3215                        reinterpret_cast<const uint64_t &>(pPipeline->graphicsPipelineCI.renderPass), err_string.c_str());
3216        }
3217
3218        if (pPipeline->graphicsPipelineCI.subpass != pCB->activeSubpass) {
3219            skip_call |=
3220                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3221                        reinterpret_cast<uint64_t const &>(pPipeline->pipeline), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
3222                        "Pipeline was built for subpass %u but used in subpass %u", pPipeline->graphicsPipelineCI.subpass,
3223                        pCB->activeSubpass);
3224        }
3225    }
3226    // TODO : Add more checks here
3227
3228    return skip_call;
3229}
3230
3231// Validate overall state at the time of a draw call
3232static bool validate_and_update_draw_state(layer_data *my_data, GLOBAL_CB_NODE *cb_node, const bool indexedDraw,
3233                                           const VkPipelineBindPoint bindPoint, const char *function) {
3234    bool result = false;
3235    auto const &state = cb_node->lastBound[bindPoint];
3236    PIPELINE_STATE *pPipe = state.pipeline_state;
3237    if (nullptr == pPipe) {
3238        result |= log_msg(
3239            my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
3240            DRAWSTATE_INVALID_PIPELINE, "DS",
3241            "At Draw/Dispatch time no valid VkPipeline is bound! This is illegal. Please bind one with vkCmdBindPipeline().");
3242        // Early return as any further checks below will be busted w/o a pipeline
3243        if (result)
3244            return true;
3245    }
3246    // First check flag states
3247    if (VK_PIPELINE_BIND_POINT_GRAPHICS == bindPoint)
3248        result = validate_draw_state_flags(my_data, cb_node, pPipe, indexedDraw);
3249
3250    // Now complete other state checks
3251    if (VK_NULL_HANDLE != state.pipeline_layout.layout) {
3252        string errorString;
3253        auto pipeline_layout = pPipe->pipeline_layout;
3254
3255        // Need a vector (vs. std::set) of active Sets for dynamicOffset validation in case same set bound w/ different offsets
3256        vector<std::tuple<cvdescriptorset::DescriptorSet *, std::map<uint32_t, descriptor_req>, std::vector<uint32_t> const *>>
3257            activeSetBindingsPairs;
3258        for (auto & setBindingPair : pPipe->active_slots) {
3259            uint32_t setIndex = setBindingPair.first;
3260            // If valid set is not bound throw an error
3261            if ((state.boundDescriptorSets.size() <= setIndex) || (!state.boundDescriptorSets[setIndex])) {
3262                result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3263                                  DRAWSTATE_DESCRIPTOR_SET_NOT_BOUND, "DS",
3264                                  "VkPipeline 0x%" PRIxLEAST64 " uses set #%u but that set is not bound.", (uint64_t)pPipe->pipeline,
3265                                  setIndex);
3266            } else if (!verify_set_layout_compatibility(my_data, state.boundDescriptorSets[setIndex], &pipeline_layout, setIndex,
3267                                                        errorString)) {
3268                // Set is bound but not compatible w/ overlapping pipeline_layout from PSO
3269                VkDescriptorSet setHandle = state.boundDescriptorSets[setIndex]->GetSet();
3270                result |=
3271                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3272                            (uint64_t)setHandle, __LINE__, DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
3273                            "VkDescriptorSet (0x%" PRIxLEAST64
3274                            ") bound as set #%u is not compatible with overlapping VkPipelineLayout 0x%" PRIxLEAST64 " due to: %s",
3275                            reinterpret_cast<uint64_t &>(setHandle), setIndex, reinterpret_cast<uint64_t &>(pipeline_layout.layout),
3276                            errorString.c_str());
3277            } else { // Valid set is bound and layout compatible, validate that it's updated
3278                // Pull the set node
3279                cvdescriptorset::DescriptorSet *pSet = state.boundDescriptorSets[setIndex];
3280                // Gather active bindings
3281                std::unordered_set<uint32_t> bindings;
3282                for (auto binding : setBindingPair.second) {
3283                    bindings.insert(binding.first);
3284                }
3285                // Bind this set and its active descriptor resources to the command buffer
3286                pSet->BindCommandBuffer(cb_node, bindings);
3287                // Save vector of all active sets to verify dynamicOffsets below
3288                activeSetBindingsPairs.push_back(std::make_tuple(pSet, setBindingPair.second, &state.dynamicOffsets[setIndex]));
3289                // Make sure set has been updated if it has no immutable samplers
3290                //  If it has immutable samplers, we'll flag error later as needed depending on binding
3291                if (!pSet->IsUpdated()) {
3292                    for (auto binding : bindings) {
3293                        if (!pSet->GetImmutableSamplerPtrFromBinding(binding)) {
3294                            result |= log_msg(
3295                                my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3296                                (uint64_t)pSet->GetSet(), __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
3297                                "DS 0x%" PRIxLEAST64 " bound but it was never updated. It is now being used to draw so "
3298                                "this will result in undefined behavior.",
3299                                (uint64_t)pSet->GetSet());
3300                        }
3301                    }
3302                }
3303            }
3304        }
3305        // For given active slots, verify any dynamic descriptors and record updated images & buffers
3306        result |= validate_and_update_drawtime_descriptor_state(my_data, cb_node, activeSetBindingsPairs, function);
3307    }
3308
3309    // Check general pipeline state that needs to be validated at drawtime
3310    if (VK_PIPELINE_BIND_POINT_GRAPHICS == bindPoint)
3311        result |= validatePipelineDrawtimeState(my_data, state, cb_node, pPipe);
3312
3313    return result;
3314}
3315
3316// Validate HW line width capabilities prior to setting requested line width.
3317static bool verifyLineWidth(layer_data *my_data, DRAW_STATE_ERROR dsError, const uint64_t &target, float lineWidth) {
3318    bool skip_call = false;
3319
3320    // First check to see if the physical device supports wide lines.
3321    if ((VK_FALSE == my_data->enabled_features.wideLines) && (1.0f != lineWidth)) {
3322        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, target, __LINE__,
3323                             dsError, "DS", "Attempt to set lineWidth to %f but physical device wideLines feature "
3324                                            "not supported/enabled so lineWidth must be 1.0f!",
3325                             lineWidth);
3326    } else {
3327        // Otherwise, make sure the width falls in the valid range.
3328        if ((my_data->phys_dev_properties.properties.limits.lineWidthRange[0] > lineWidth) ||
3329            (my_data->phys_dev_properties.properties.limits.lineWidthRange[1] < lineWidth)) {
3330            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, target,
3331                                 __LINE__, dsError, "DS", "Attempt to set lineWidth to %f but physical device limits line width "
3332                                                          "to between [%f, %f]!",
3333                                 lineWidth, my_data->phys_dev_properties.properties.limits.lineWidthRange[0],
3334                                 my_data->phys_dev_properties.properties.limits.lineWidthRange[1]);
3335        }
3336    }
3337
3338    return skip_call;
3339}
3340
3341// Verify that create state for a pipeline is valid
3342static bool verifyPipelineCreateState(layer_data *my_data, const VkDevice device, std::vector<PIPELINE_STATE *> pPipelines,
3343                                      int pipelineIndex) {
3344    bool skip_call = false;
3345
3346    PIPELINE_STATE *pPipeline = pPipelines[pipelineIndex];
3347
3348    // If create derivative bit is set, check that we've specified a base
3349    // pipeline correctly, and that the base pipeline was created to allow
3350    // derivatives.
3351    if (pPipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
3352        PIPELINE_STATE *pBasePipeline = nullptr;
3353        if (!((pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) ^
3354              (pPipeline->graphicsPipelineCI.basePipelineIndex != -1))) {
3355            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3356                                 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3357                                 "Invalid Pipeline CreateInfo: exactly one of base pipeline index and handle must be specified");
3358        } else if (pPipeline->graphicsPipelineCI.basePipelineIndex != -1) {
3359            if (pPipeline->graphicsPipelineCI.basePipelineIndex >= pipelineIndex) {
3360                skip_call |=
3361                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3362                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3363                            "Invalid Pipeline CreateInfo: base pipeline must occur earlier in array than derivative pipeline.");
3364            } else {
3365                pBasePipeline = pPipelines[pPipeline->graphicsPipelineCI.basePipelineIndex];
3366            }
3367        } else if (pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) {
3368            pBasePipeline = getPipelineState(my_data, pPipeline->graphicsPipelineCI.basePipelineHandle);
3369        }
3370
3371        if (pBasePipeline && !(pBasePipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) {
3372            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3373                                 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3374                                 "Invalid Pipeline CreateInfo: base pipeline does not allow derivatives.");
3375        }
3376    }
3377
3378    if (pPipeline->graphicsPipelineCI.pColorBlendState != NULL) {
3379        if (!my_data->enabled_features.independentBlend) {
3380            if (pPipeline->attachments.size() > 1) {
3381                VkPipelineColorBlendAttachmentState *pAttachments = &pPipeline->attachments[0];
3382                for (size_t i = 1; i < pPipeline->attachments.size(); i++) {
3383                    // Quoting the spec: "If [the independent blend] feature is not enabled, the VkPipelineColorBlendAttachmentState
3384                    // settings for all color attachments must be identical." VkPipelineColorBlendAttachmentState contains
3385                    // only attachment state, so memcmp is best suited for the comparison
3386                    if (memcmp(static_cast<const void *>(pAttachments), static_cast<const void *>(&pAttachments[i]),
3387                               sizeof(pAttachments[0]))) {
3388                        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3389                                             __LINE__, DRAWSTATE_INDEPENDENT_BLEND, "DS",
3390                                             "Invalid Pipeline CreateInfo: If independent blend feature not "
3391                                             "enabled, all elements of pAttachments must be identical");
3392                        break;
3393                    }
3394                }
3395            }
3396        }
3397        if (!my_data->enabled_features.logicOp &&
3398            (pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable != VK_FALSE)) {
3399            skip_call |=
3400                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3401                        DRAWSTATE_DISABLED_LOGIC_OP, "DS",
3402                        "Invalid Pipeline CreateInfo: If logic operations feature not enabled, logicOpEnable must be VK_FALSE");
3403        }
3404    }
3405
3406    // Ensure the subpass index is valid. If not, then validate_and_capture_pipeline_shader_state
3407    // produces nonsense errors that confuse users. Other layers should already
3408    // emit errors for renderpass being invalid.
3409    auto renderPass = getRenderPass(my_data, pPipeline->graphicsPipelineCI.renderPass);
3410    if (renderPass && pPipeline->graphicsPipelineCI.subpass >= renderPass->createInfo.subpassCount) {
3411        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3412                             DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: Subpass index %u "
3413                                                                            "is out of range for this renderpass (0..%u)",
3414                             pPipeline->graphicsPipelineCI.subpass, renderPass->createInfo.subpassCount - 1);
3415    }
3416
3417    if (!validate_and_capture_pipeline_shader_state(my_data->report_data, pPipeline, &my_data->enabled_features,
3418                                                    my_data->shaderModuleMap)) {
3419        skip_call = true;
3420    }
3421    // Each shader's stage must be unique
3422    if (pPipeline->duplicate_shaders) {
3423        for (uint32_t stage = VK_SHADER_STAGE_VERTEX_BIT; stage & VK_SHADER_STAGE_ALL_GRAPHICS; stage <<= 1) {
3424            if (pPipeline->duplicate_shaders & stage) {
3425                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
3426                                     __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3427                                     "Invalid Pipeline CreateInfo State: Multiple shaders provided for stage %s",
3428                                     string_VkShaderStageFlagBits(VkShaderStageFlagBits(stage)));
3429            }
3430        }
3431    }
3432    // VS is required
3433    if (!(pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT)) {
3434        skip_call |=
3435            log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3436                    DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: Vtx Shader required");
3437    }
3438    // Either both or neither TC/TE shaders should be defined
3439    if (((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) == 0) !=
3440        ((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) == 0)) {
3441        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3442                             DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3443                             "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair");
3444    }
3445    // Compute shaders should be specified independent of Gfx shaders
3446    if ((pPipeline->active_shaders & VK_SHADER_STAGE_COMPUTE_BIT) &&
3447        (pPipeline->active_shaders &
3448         (VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT |
3449          VK_SHADER_STAGE_GEOMETRY_BIT | VK_SHADER_STAGE_FRAGMENT_BIT))) {
3450        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3451                             DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3452                             "Invalid Pipeline CreateInfo State: Do not specify Compute Shader for Gfx Pipeline");
3453    }
3454    // VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid for tessellation pipelines.
3455    // Mismatching primitive topology and tessellation fails graphics pipeline creation.
3456    if (pPipeline->active_shaders & (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) &&
3457        (!pPipeline->graphicsPipelineCI.pInputAssemblyState ||
3458         pPipeline->graphicsPipelineCI.pInputAssemblyState->topology != VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
3459        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3460                             DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3461                                                                            "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST must be set as IA "
3462                                                                            "topology for tessellation pipelines");
3463    }
3464    if (pPipeline->graphicsPipelineCI.pInputAssemblyState &&
3465        pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) {
3466        if (~pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) {
3467            skip_call |=
3468                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3469                        DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3470                                                                       "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3471                                                                       "topology is only valid for tessellation pipelines");
3472        }
3473        if (!pPipeline->graphicsPipelineCI.pTessellationState) {
3474            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3475                                 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3476                                 "Invalid Pipeline CreateInfo State: "
3477                                 "pTessellationState is NULL when VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3478                                 "topology used. pTessellationState must not be NULL in this case.");
3479        } else if (!pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints ||
3480                   (pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints > 32)) {
3481            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3482                                 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3483                                                                                "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3484                                                                                "topology used with patchControlPoints value %u."
3485                                                                                " patchControlPoints should be >0 and <=32.",
3486                                 pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints);
3487        }
3488    }
3489    // If a rasterization state is provided, make sure that the line width conforms to the HW.
3490    if (pPipeline->graphicsPipelineCI.pRasterizationState) {
3491        if (!isDynamic(pPipeline, VK_DYNAMIC_STATE_LINE_WIDTH)) {
3492            skip_call |= verifyLineWidth(my_data, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, reinterpret_cast<uint64_t &>(pPipeline),
3493                                         pPipeline->graphicsPipelineCI.pRasterizationState->lineWidth);
3494        }
3495    }
3496    // Viewport state must be included if rasterization is enabled.
3497    // If the viewport state is included, the viewport and scissor counts should always match.
3498    // NOTE : Even if these are flagged as dynamic, counts need to be set correctly for shader compiler
3499    if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
3500        (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
3501        if (!pPipeline->graphicsPipelineCI.pViewportState) {
3502            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3503                                 DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", "Gfx Pipeline pViewportState is null. Even if viewport "
3504                                                                            "and scissors are dynamic PSO must include "
3505                                                                            "viewportCount and scissorCount in pViewportState.");
3506        } else if (pPipeline->graphicsPipelineCI.pViewportState->scissorCount !=
3507                   pPipeline->graphicsPipelineCI.pViewportState->viewportCount) {
3508            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3509                                 DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3510                                 "Gfx Pipeline viewport count (%u) must match scissor count (%u).",
3511                                 pPipeline->graphicsPipelineCI.pViewportState->viewportCount,
3512                                 pPipeline->graphicsPipelineCI.pViewportState->scissorCount);
3513        } else {
3514            // If viewport or scissor are not dynamic, then verify that data is appropriate for count
3515            bool dynViewport = isDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
3516            bool dynScissor = isDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
3517            if (!dynViewport) {
3518                if (pPipeline->graphicsPipelineCI.pViewportState->viewportCount &&
3519                    !pPipeline->graphicsPipelineCI.pViewportState->pViewports) {
3520                    skip_call |=
3521                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3522                                DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3523                                "Gfx Pipeline viewportCount is %u, but pViewports is NULL. For non-zero viewportCount, you "
3524                                "must either include pViewports data, or include viewport in pDynamicState and set it with "
3525                                "vkCmdSetViewport().",
3526                                pPipeline->graphicsPipelineCI.pViewportState->viewportCount);
3527                }
3528            }
3529            if (!dynScissor) {
3530                if (pPipeline->graphicsPipelineCI.pViewportState->scissorCount &&
3531                    !pPipeline->graphicsPipelineCI.pViewportState->pScissors) {
3532                    skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3533                                         __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3534                                         "Gfx Pipeline scissorCount is %u, but pScissors is NULL. For non-zero scissorCount, you "
3535                                         "must either include pScissors data, or include scissor in pDynamicState and set it with "
3536                                         "vkCmdSetScissor().",
3537                                         pPipeline->graphicsPipelineCI.pViewportState->scissorCount);
3538                }
3539            }
3540        }
3541
3542        // If rasterization is not disabled, and subpass uses a depth/stencil
3543        // attachment, pDepthStencilState must be a pointer to a valid structure
3544        auto subpass_desc = renderPass ? &renderPass->createInfo.pSubpasses[pPipeline->graphicsPipelineCI.subpass] : nullptr;
3545        if (subpass_desc && subpass_desc->pDepthStencilAttachment &&
3546            subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
3547            if (!pPipeline->graphicsPipelineCI.pDepthStencilState) {
3548                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
3549                                     __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3550                                     "Invalid Pipeline CreateInfo State: "
3551                                     "pDepthStencilState is NULL when rasterization is enabled and subpass uses a "
3552                                     "depth/stencil attachment");
3553            }
3554        }
3555    }
3556    return skip_call;
3557}
3558
3559// Free the Pipeline nodes
3560static void deletePipelines(layer_data *my_data) {
3561    if (my_data->pipelineMap.size() <= 0)
3562        return;
3563    for (auto &pipe_map_pair : my_data->pipelineMap) {
3564        delete pipe_map_pair.second;
3565    }
3566    my_data->pipelineMap.clear();
3567}
3568
3569// Block of code at start here specifically for managing/tracking DSs
3570
3571// Return Pool node ptr for specified pool or else NULL
3572DESCRIPTOR_POOL_NODE *getPoolNode(const layer_data *dev_data, const VkDescriptorPool pool) {
3573    auto pool_it = dev_data->descriptorPoolMap.find(pool);
3574    if (pool_it == dev_data->descriptorPoolMap.end()) {
3575        return NULL;
3576    }
3577    return pool_it->second;
3578}
3579
3580// Return false if update struct is of valid type, otherwise flag error and return code from callback
3581static bool validUpdateStruct(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
3582    switch (pUpdateStruct->sType) {
3583    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3584    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3585        return false;
3586    default:
3587        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3588                       DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3589                       "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3590                       string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
3591    }
3592}
3593
3594// Set count for given update struct in the last parameter
3595static uint32_t getUpdateCount(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
3596    switch (pUpdateStruct->sType) {
3597    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3598        return ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorCount;
3599    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3600        // TODO : Need to understand this case better and make sure code is correct
3601        return ((VkCopyDescriptorSet *)pUpdateStruct)->descriptorCount;
3602    default:
3603        return 0;
3604    }
3605}
3606
3607// For given layout and update, return the first overall index of the layout that is updated
3608static uint32_t getUpdateStartIndex(layer_data *my_data, const VkDevice device, const uint32_t binding_start_index,
3609                                    const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
3610    return binding_start_index + arrayIndex;
3611}
3612// For given layout and update, return the last overall index of the layout that is updated
3613static uint32_t getUpdateEndIndex(layer_data *my_data, const VkDevice device, const uint32_t binding_start_index,
3614                                  const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
3615    uint32_t count = getUpdateCount(my_data, device, pUpdateStruct);
3616    return binding_start_index + arrayIndex + count - 1;
3617}
3618// Verify that the descriptor type in the update struct matches what's expected by the layout
3619static bool validateUpdateConsistency(layer_data *my_data, const VkDevice device, const VkDescriptorType layout_type,
3620                                      const GENERIC_HEADER *pUpdateStruct, uint32_t startIndex, uint32_t endIndex) {
3621    // First get actual type of update
3622    bool skip_call = false;
3623    VkDescriptorType actualType = VK_DESCRIPTOR_TYPE_MAX_ENUM;
3624    switch (pUpdateStruct->sType) {
3625    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3626        actualType = ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorType;
3627        break;
3628    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3629        /* no need to validate */
3630        return false;
3631        break;
3632    default:
3633        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3634                             DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3635                             "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3636                             string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
3637    }
3638    if (!skip_call) {
3639        if (layout_type != actualType) {
3640            skip_call |= log_msg(
3641                my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3642                DRAWSTATE_DESCRIPTOR_TYPE_MISMATCH, "DS",
3643                "Write descriptor update has descriptor type %s that does not match overlapping binding descriptor type of %s!",
3644                string_VkDescriptorType(actualType), string_VkDescriptorType(layout_type));
3645        }
3646    }
3647    return skip_call;
3648}
3649//TODO: Consolidate functions
3650bool FindLayout(const GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, IMAGE_CMD_BUF_LAYOUT_NODE &node, const VkImageAspectFlags aspectMask) {
3651    layer_data *my_data = get_my_data_ptr(get_dispatch_key(pCB->commandBuffer), layer_data_map);
3652    if (!(imgpair.subresource.aspectMask & aspectMask)) {
3653        return false;
3654    }
3655    VkImageAspectFlags oldAspectMask = imgpair.subresource.aspectMask;
3656    imgpair.subresource.aspectMask = aspectMask;
3657    auto imgsubIt = pCB->imageLayoutMap.find(imgpair);
3658    if (imgsubIt == pCB->imageLayoutMap.end()) {
3659        return false;
3660    }
3661    if (node.layout != VK_IMAGE_LAYOUT_MAX_ENUM && node.layout != imgsubIt->second.layout) {
3662        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3663                reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3664                "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple layout types: %s and %s",
3665                reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(node.layout), string_VkImageLayout(imgsubIt->second.layout));
3666    }
3667    if (node.initialLayout != VK_IMAGE_LAYOUT_MAX_ENUM && node.initialLayout != imgsubIt->second.initialLayout) {
3668        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3669                reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3670                "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple initial layout types: %s and %s",
3671                reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(node.initialLayout), string_VkImageLayout(imgsubIt->second.initialLayout));
3672    }
3673    node = imgsubIt->second;
3674    return true;
3675}
3676
3677bool FindLayout(const layer_data *my_data, ImageSubresourcePair imgpair, VkImageLayout &layout, const VkImageAspectFlags aspectMask) {
3678    if (!(imgpair.subresource.aspectMask & aspectMask)) {
3679        return false;
3680    }
3681    VkImageAspectFlags oldAspectMask = imgpair.subresource.aspectMask;
3682    imgpair.subresource.aspectMask = aspectMask;
3683    auto imgsubIt = my_data->imageLayoutMap.find(imgpair);
3684    if (imgsubIt == my_data->imageLayoutMap.end()) {
3685        return false;
3686    }
3687    if (layout != VK_IMAGE_LAYOUT_MAX_ENUM && layout != imgsubIt->second.layout) {
3688        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3689                reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3690                "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple layout types: %s and %s",
3691                reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(layout), string_VkImageLayout(imgsubIt->second.layout));
3692    }
3693    layout = imgsubIt->second.layout;
3694    return true;
3695}
3696
3697// find layout(s) on the cmd buf level
3698bool FindLayout(const GLOBAL_CB_NODE *pCB, VkImage image, VkImageSubresource range, IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3699    ImageSubresourcePair imgpair = {image, true, range};
3700    node = IMAGE_CMD_BUF_LAYOUT_NODE(VK_IMAGE_LAYOUT_MAX_ENUM, VK_IMAGE_LAYOUT_MAX_ENUM);
3701    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_COLOR_BIT);
3702    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_DEPTH_BIT);
3703    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_STENCIL_BIT);
3704    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_METADATA_BIT);
3705    if (node.layout == VK_IMAGE_LAYOUT_MAX_ENUM) {
3706        imgpair = {image, false, VkImageSubresource()};
3707        auto imgsubIt = pCB->imageLayoutMap.find(imgpair);
3708        if (imgsubIt == pCB->imageLayoutMap.end())
3709            return false;
3710        node = imgsubIt->second;
3711    }
3712    return true;
3713}
3714
3715// find layout(s) on the global level
3716bool FindLayout(const layer_data *my_data, ImageSubresourcePair imgpair, VkImageLayout &layout) {
3717    layout = VK_IMAGE_LAYOUT_MAX_ENUM;
3718    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT);
3719    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT);
3720    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT);
3721    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT);
3722    if (layout == VK_IMAGE_LAYOUT_MAX_ENUM) {
3723        imgpair = {imgpair.image, false, VkImageSubresource()};
3724        auto imgsubIt = my_data->imageLayoutMap.find(imgpair);
3725        if (imgsubIt == my_data->imageLayoutMap.end())
3726            return false;
3727        layout = imgsubIt->second.layout;
3728    }
3729    return true;
3730}
3731
3732bool FindLayout(const layer_data *my_data, VkImage image, VkImageSubresource range, VkImageLayout &layout) {
3733    ImageSubresourcePair imgpair = {image, true, range};
3734    return FindLayout(my_data, imgpair, layout);
3735}
3736
3737bool FindLayouts(const layer_data *my_data, VkImage image, std::vector<VkImageLayout> &layouts) {
3738    auto sub_data = my_data->imageSubresourceMap.find(image);
3739    if (sub_data == my_data->imageSubresourceMap.end())
3740        return false;
3741    auto img_node = getImageNode(my_data, image);
3742    if (!img_node)
3743        return false;
3744    bool ignoreGlobal = false;
3745    // TODO: Make this robust for >1 aspect mask. Now it will just say ignore
3746    // potential errors in this case.
3747    if (sub_data->second.size() >= (img_node->createInfo.arrayLayers * img_node->createInfo.mipLevels + 1)) {
3748        ignoreGlobal = true;
3749    }
3750    for (auto imgsubpair : sub_data->second) {
3751        if (ignoreGlobal && !imgsubpair.hasSubresource)
3752            continue;
3753        auto img_data = my_data->imageLayoutMap.find(imgsubpair);
3754        if (img_data != my_data->imageLayoutMap.end()) {
3755            layouts.push_back(img_data->second.layout);
3756        }
3757    }
3758    return true;
3759}
3760
3761// Set the layout on the global level
3762void SetLayout(layer_data *my_data, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
3763    VkImage &image = imgpair.image;
3764    // TODO (mlentine): Maybe set format if new? Not used atm.
3765    my_data->imageLayoutMap[imgpair].layout = layout;
3766    // TODO (mlentine): Maybe make vector a set?
3767    auto subresource = std::find(my_data->imageSubresourceMap[image].begin(), my_data->imageSubresourceMap[image].end(), imgpair);
3768    if (subresource == my_data->imageSubresourceMap[image].end()) {
3769        my_data->imageSubresourceMap[image].push_back(imgpair);
3770    }
3771}
3772
3773// Set the layout on the cmdbuf level
3774void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3775    pCB->imageLayoutMap[imgpair] = node;
3776    // TODO (mlentine): Maybe make vector a set?
3777    auto subresource =
3778        std::find(pCB->imageSubresourceMap[imgpair.image].begin(), pCB->imageSubresourceMap[imgpair.image].end(), imgpair);
3779    if (subresource == pCB->imageSubresourceMap[imgpair.image].end()) {
3780        pCB->imageSubresourceMap[imgpair.image].push_back(imgpair);
3781    }
3782}
3783
3784void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
3785    // TODO (mlentine): Maybe make vector a set?
3786    if (std::find(pCB->imageSubresourceMap[imgpair.image].begin(), pCB->imageSubresourceMap[imgpair.image].end(), imgpair) !=
3787        pCB->imageSubresourceMap[imgpair.image].end()) {
3788        pCB->imageLayoutMap[imgpair].layout = layout;
3789    } else {
3790        // TODO (mlentine): Could be expensive and might need to be removed.
3791        assert(imgpair.hasSubresource);
3792        IMAGE_CMD_BUF_LAYOUT_NODE node;
3793        if (!FindLayout(pCB, imgpair.image, imgpair.subresource, node)) {
3794            node.initialLayout = layout;
3795        }
3796        SetLayout(pCB, imgpair, {node.initialLayout, layout});
3797    }
3798}
3799
3800template <class OBJECT, class LAYOUT>
3801void SetLayout(OBJECT *pObject, ImageSubresourcePair imgpair, const LAYOUT &layout, VkImageAspectFlags aspectMask) {
3802    if (imgpair.subresource.aspectMask & aspectMask) {
3803        imgpair.subresource.aspectMask = aspectMask;
3804        SetLayout(pObject, imgpair, layout);
3805    }
3806}
3807
3808template <class OBJECT, class LAYOUT>
3809void SetLayout(OBJECT *pObject, VkImage image, VkImageSubresource range, const LAYOUT &layout) {
3810    ImageSubresourcePair imgpair = {image, true, range};
3811    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT);
3812    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT);
3813    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT);
3814    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT);
3815}
3816
3817template <class OBJECT, class LAYOUT> void SetLayout(OBJECT *pObject, VkImage image, const LAYOUT &layout) {
3818    ImageSubresourcePair imgpair = {image, false, VkImageSubresource()};
3819    SetLayout(pObject, image, imgpair, layout);
3820}
3821
3822void SetLayout(const layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkImageView imageView, const VkImageLayout &layout) {
3823    auto view_state = getImageViewState(dev_data, imageView);
3824    assert(view_state);
3825    auto image = view_state->create_info.image;
3826    const VkImageSubresourceRange &subRange = view_state->create_info.subresourceRange;
3827    // TODO: Do not iterate over every possibility - consolidate where possible
3828    for (uint32_t j = 0; j < subRange.levelCount; j++) {
3829        uint32_t level = subRange.baseMipLevel + j;
3830        for (uint32_t k = 0; k < subRange.layerCount; k++) {
3831            uint32_t layer = subRange.baseArrayLayer + k;
3832            VkImageSubresource sub = {subRange.aspectMask, level, layer};
3833            // TODO: If ImageView was created with depth or stencil, transition both layouts as
3834            // the aspectMask is ignored and both are used. Verify that the extra implicit layout
3835            // is OK for descriptor set layout validation
3836            if (subRange.aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
3837                if (vk_format_is_depth_and_stencil(view_state->create_info.format)) {
3838                    sub.aspectMask |= (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT);
3839                }
3840            }
3841            SetLayout(pCB, image, sub, layout);
3842        }
3843    }
3844}
3845
3846// Validate that given set is valid and that it's not being used by an in-flight CmdBuffer
3847// func_str is the name of the calling function
3848// Return false if no errors occur
3849// Return true if validation error occurs and callback returns true (to skip upcoming API call down the chain)
3850static bool validateIdleDescriptorSet(const layer_data *dev_data, VkDescriptorSet set, std::string func_str) {
3851    if (dev_data->instance_state->disabled.idle_descriptor_set)
3852        return false;
3853    bool skip_call = false;
3854    auto set_node = dev_data->setMap.find(set);
3855    if (set_node == dev_data->setMap.end()) {
3856        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3857                             (uint64_t)(set), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
3858                             "Cannot call %s() on descriptor set 0x%" PRIxLEAST64 " that has not been allocated.", func_str.c_str(),
3859                             (uint64_t)(set));
3860    } else {
3861        // TODO : This covers various error cases so should pass error enum into this function and use passed in enum here
3862        if (set_node->second->in_use.load()) {
3863            skip_call |=
3864                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3865                        (uint64_t)(set), __LINE__, VALIDATION_ERROR_00919, "DS",
3866                        "Cannot call %s() on descriptor set 0x%" PRIxLEAST64 " that is in use by a command buffer. %s",
3867                        func_str.c_str(), (uint64_t)(set), validation_error_map[VALIDATION_ERROR_00919]);
3868        }
3869    }
3870    return skip_call;
3871}
3872
3873// Remove set from setMap and delete the set
3874static void freeDescriptorSet(layer_data *dev_data, cvdescriptorset::DescriptorSet *descriptor_set) {
3875    dev_data->setMap.erase(descriptor_set->GetSet());
3876    delete descriptor_set;
3877}
3878// Free all DS Pools including their Sets & related sub-structs
3879// NOTE : Calls to this function should be wrapped in mutex
3880static void deletePools(layer_data *my_data) {
3881    if (my_data->descriptorPoolMap.size() <= 0)
3882        return;
3883    for (auto ii = my_data->descriptorPoolMap.begin(); ii != my_data->descriptorPoolMap.end(); ++ii) {
3884        // Remove this pools' sets from setMap and delete them
3885        for (auto ds : (*ii).second->sets) {
3886            freeDescriptorSet(my_data, ds);
3887        }
3888        (*ii).second->sets.clear();
3889    }
3890    my_data->descriptorPoolMap.clear();
3891}
3892
3893static void clearDescriptorPool(layer_data *my_data, const VkDevice device, const VkDescriptorPool pool,
3894                                VkDescriptorPoolResetFlags flags) {
3895    DESCRIPTOR_POOL_NODE *pPool = getPoolNode(my_data, pool);
3896    // TODO: validate flags
3897    // For every set off of this pool, clear it, remove from setMap, and free cvdescriptorset::DescriptorSet
3898    for (auto ds : pPool->sets) {
3899        freeDescriptorSet(my_data, ds);
3900    }
3901    pPool->sets.clear();
3902    // Reset available count for each type and available sets for this pool
3903    for (uint32_t i = 0; i < pPool->availableDescriptorTypeCount.size(); ++i) {
3904        pPool->availableDescriptorTypeCount[i] = pPool->maxDescriptorTypeCount[i];
3905    }
3906    pPool->availableSets = pPool->maxSets;
3907}
3908
3909// For given CB object, fetch associated CB Node from map
3910static GLOBAL_CB_NODE *getCBNode(layer_data const *my_data, const VkCommandBuffer cb) {
3911    auto it = my_data->commandBufferMap.find(cb);
3912    if (it == my_data->commandBufferMap.end()) {
3913        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3914                reinterpret_cast<const uint64_t &>(cb), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3915                "Attempt to use CommandBuffer 0x%" PRIxLEAST64 " that doesn't exist!", (uint64_t)(cb));
3916        return NULL;
3917    }
3918    return it->second;
3919}
3920// Free all CB Nodes
3921// NOTE : Calls to this function should be wrapped in mutex
3922static void deleteCommandBuffers(layer_data *my_data) {
3923    if (my_data->commandBufferMap.empty()) {
3924        return;
3925    }
3926    for (auto ii = my_data->commandBufferMap.begin(); ii != my_data->commandBufferMap.end(); ++ii) {
3927        delete (*ii).second;
3928    }
3929    my_data->commandBufferMap.clear();
3930}
3931
3932static bool report_error_no_cb_begin(const layer_data *dev_data, const VkCommandBuffer cb, const char *caller_name) {
3933    return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3934                   (uint64_t)cb, __LINE__, DRAWSTATE_NO_BEGIN_COMMAND_BUFFER, "DS",
3935                   "You must call vkBeginCommandBuffer() before this call to %s", caller_name);
3936}
3937
3938bool validateCmdsInCmdBuffer(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd_type) {
3939    if (!pCB->activeRenderPass)
3940        return false;
3941    bool skip_call = false;
3942    if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS &&
3943        (cmd_type != CMD_EXECUTECOMMANDS && cmd_type != CMD_NEXTSUBPASS && cmd_type != CMD_ENDRENDERPASS)) {
3944        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3945                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3946                             "Commands cannot be called in a subpass using secondary command buffers.");
3947    } else if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_INLINE && cmd_type == CMD_EXECUTECOMMANDS) {
3948        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3949                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3950                             "vkCmdExecuteCommands() cannot be called in a subpass using inline commands.");
3951    }
3952    return skip_call;
3953}
3954
3955static bool checkGraphicsBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
3956    if (!(flags & VK_QUEUE_GRAPHICS_BIT))
3957        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3958                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3959                       "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
3960    return false;
3961}
3962
3963static bool checkComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
3964    if (!(flags & VK_QUEUE_COMPUTE_BIT))
3965        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3966                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3967                       "Cannot call %s on a command buffer allocated from a pool without compute capabilities.", name);
3968    return false;
3969}
3970
3971static bool checkGraphicsOrComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
3972    if (!((flags & VK_QUEUE_GRAPHICS_BIT) || (flags & VK_QUEUE_COMPUTE_BIT)))
3973        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3974                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3975                       "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
3976    return false;
3977}
3978
3979// Add specified CMD to the CmdBuffer in given pCB, flagging errors if CB is not
3980//  in the recording state or if there's an issue with the Cmd ordering
3981static bool addCmd(layer_data *my_data, GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd, const char *caller_name) {
3982    bool skip_call = false;
3983    auto pPool = getCommandPoolNode(my_data, pCB->createInfo.commandPool);
3984    if (pPool) {
3985        VkQueueFlags flags = my_data->phys_dev_properties.queue_family_properties[pPool->queueFamilyIndex].queueFlags;
3986        switch (cmd) {
3987        case CMD_BINDPIPELINE:
3988        case CMD_BINDPIPELINEDELTA:
3989        case CMD_BINDDESCRIPTORSETS:
3990        case CMD_FILLBUFFER:
3991        case CMD_CLEARCOLORIMAGE:
3992        case CMD_SETEVENT:
3993        case CMD_RESETEVENT:
3994        case CMD_WAITEVENTS:
3995        case CMD_BEGINQUERY:
3996        case CMD_ENDQUERY:
3997        case CMD_RESETQUERYPOOL:
3998        case CMD_COPYQUERYPOOLRESULTS:
3999        case CMD_WRITETIMESTAMP:
4000            skip_call |= checkGraphicsOrComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
4001            break;
4002        case CMD_SETVIEWPORTSTATE:
4003        case CMD_SETSCISSORSTATE:
4004        case CMD_SETLINEWIDTHSTATE:
4005        case CMD_SETDEPTHBIASSTATE:
4006        case CMD_SETBLENDSTATE:
4007        case CMD_SETDEPTHBOUNDSSTATE:
4008        case CMD_SETSTENCILREADMASKSTATE:
4009        case CMD_SETSTENCILWRITEMASKSTATE:
4010        case CMD_SETSTENCILREFERENCESTATE:
4011        case CMD_BINDINDEXBUFFER:
4012        case CMD_BINDVERTEXBUFFER:
4013        case CMD_DRAW:
4014        case CMD_DRAWINDEXED:
4015        case CMD_DRAWINDIRECT:
4016        case CMD_DRAWINDEXEDINDIRECT:
4017        case CMD_BLITIMAGE:
4018        case CMD_CLEARATTACHMENTS:
4019        case CMD_CLEARDEPTHSTENCILIMAGE:
4020        case CMD_RESOLVEIMAGE:
4021        case CMD_BEGINRENDERPASS:
4022        case CMD_NEXTSUBPASS:
4023        case CMD_ENDRENDERPASS:
4024            skip_call |= checkGraphicsBit(my_data, flags, cmdTypeToString(cmd).c_str());
4025            break;
4026        case CMD_DISPATCH:
4027        case CMD_DISPATCHINDIRECT:
4028            skip_call |= checkComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
4029            break;
4030        case CMD_COPYBUFFER:
4031        case CMD_COPYIMAGE:
4032        case CMD_COPYBUFFERTOIMAGE:
4033        case CMD_COPYIMAGETOBUFFER:
4034        case CMD_CLONEIMAGEDATA:
4035        case CMD_UPDATEBUFFER:
4036        case CMD_PIPELINEBARRIER:
4037        case CMD_EXECUTECOMMANDS:
4038        case CMD_END:
4039            break;
4040        default:
4041            break;
4042        }
4043    }
4044    if (pCB->state != CB_RECORDING) {
4045        skip_call |= report_error_no_cb_begin(my_data, pCB->commandBuffer, caller_name);
4046    } else {
4047        skip_call |= validateCmdsInCmdBuffer(my_data, pCB, cmd);
4048        CMD_NODE cmdNode = {};
4049        // init cmd node and append to end of cmd LL
4050        cmdNode.cmdNumber = ++pCB->numCmds;
4051        cmdNode.type = cmd;
4052        pCB->cmds.push_back(cmdNode);
4053    }
4054    return skip_call;
4055}
4056// For given object struct return a ptr of BASE_NODE type for its wrapping struct
4057BASE_NODE *GetStateStructPtrFromObject(layer_data *dev_data, VK_OBJECT object_struct) {
4058    BASE_NODE *base_ptr = nullptr;
4059    switch (object_struct.type) {
4060    case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT: {
4061        base_ptr = getSetNode(dev_data, reinterpret_cast<VkDescriptorSet &>(object_struct.handle));
4062        break;
4063    }
4064    case VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT: {
4065        base_ptr = getSamplerNode(dev_data, reinterpret_cast<VkSampler &>(object_struct.handle));
4066        break;
4067    }
4068    case VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT: {
4069        base_ptr = getQueryPoolNode(dev_data, reinterpret_cast<VkQueryPool &>(object_struct.handle));
4070        break;
4071    }
4072    case VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT: {
4073        base_ptr = getPipelineState(dev_data, reinterpret_cast<VkPipeline &>(object_struct.handle));
4074        break;
4075    }
4076    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
4077        base_ptr = getBufferNode(dev_data, reinterpret_cast<VkBuffer &>(object_struct.handle));
4078        break;
4079    }
4080    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT: {
4081        base_ptr = getBufferViewState(dev_data, reinterpret_cast<VkBufferView &>(object_struct.handle));
4082        break;
4083    }
4084    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
4085        base_ptr = getImageNode(dev_data, reinterpret_cast<VkImage &>(object_struct.handle));
4086        break;
4087    }
4088    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT: {
4089        base_ptr = getImageViewState(dev_data, reinterpret_cast<VkImageView &>(object_struct.handle));
4090        break;
4091    }
4092    case VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT: {
4093        base_ptr = getEventNode(dev_data, reinterpret_cast<VkEvent &>(object_struct.handle));
4094        break;
4095    }
4096    case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT: {
4097        base_ptr = getPoolNode(dev_data, reinterpret_cast<VkDescriptorPool &>(object_struct.handle));
4098        break;
4099    }
4100    case VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT: {
4101        base_ptr = getCommandPoolNode(dev_data, reinterpret_cast<VkCommandPool &>(object_struct.handle));
4102        break;
4103    }
4104    case VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT: {
4105        base_ptr = getFramebufferState(dev_data, reinterpret_cast<VkFramebuffer &>(object_struct.handle));
4106        break;
4107    }
4108    case VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT: {
4109        base_ptr = getRenderPass(dev_data, reinterpret_cast<VkRenderPass &>(object_struct.handle));
4110        break;
4111    }
4112    case VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT: {
4113        base_ptr = getMemObjInfo(dev_data, reinterpret_cast<VkDeviceMemory &>(object_struct.handle));
4114        break;
4115    }
4116    default:
4117        // TODO : Any other objects to be handled here?
4118        assert(0);
4119        break;
4120    }
4121    return base_ptr;
4122}
4123
4124// Tie the VK_OBJECT to the cmd buffer which includes:
4125//  Add object_binding to cmd buffer
4126//  Add cb_binding to object
4127static void addCommandBufferBinding(std::unordered_set<GLOBAL_CB_NODE *> *cb_bindings, VK_OBJECT obj, GLOBAL_CB_NODE *cb_node) {
4128    cb_bindings->insert(cb_node);
4129    cb_node->object_bindings.insert(obj);
4130}
4131// For a given object, if cb_node is in that objects cb_bindings, remove cb_node
4132static void removeCommandBufferBinding(layer_data *dev_data, VK_OBJECT const *object, GLOBAL_CB_NODE *cb_node) {
4133    BASE_NODE *base_obj = GetStateStructPtrFromObject(dev_data, *object);
4134    if (base_obj)
4135        base_obj->cb_bindings.erase(cb_node);
4136}
4137// Reset the command buffer state
4138//  Maintain the createInfo and set state to CB_NEW, but clear all other state
4139static void resetCB(layer_data *dev_data, const VkCommandBuffer cb) {
4140    GLOBAL_CB_NODE *pCB = dev_data->commandBufferMap[cb];
4141    if (pCB) {
4142        pCB->in_use.store(0);
4143        pCB->cmds.clear();
4144        // Reset CB state (note that createInfo is not cleared)
4145        pCB->commandBuffer = cb;
4146        memset(&pCB->beginInfo, 0, sizeof(VkCommandBufferBeginInfo));
4147        memset(&pCB->inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo));
4148        pCB->numCmds = 0;
4149        memset(pCB->drawCount, 0, NUM_DRAW_TYPES * sizeof(uint64_t));
4150        pCB->state = CB_NEW;
4151        pCB->submitCount = 0;
4152        pCB->status = 0;
4153        pCB->viewportMask = 0;
4154        pCB->scissorMask = 0;
4155
4156        for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
4157            pCB->lastBound[i].reset();
4158        }
4159
4160        memset(&pCB->activeRenderPassBeginInfo, 0, sizeof(pCB->activeRenderPassBeginInfo));
4161        pCB->activeRenderPass = nullptr;
4162        pCB->activeSubpassContents = VK_SUBPASS_CONTENTS_INLINE;
4163        pCB->activeSubpass = 0;
4164        pCB->broken_bindings.clear();
4165        pCB->waitedEvents.clear();
4166        pCB->events.clear();
4167        pCB->writeEventsBeforeWait.clear();
4168        pCB->waitedEventsBeforeQueryReset.clear();
4169        pCB->queryToStateMap.clear();
4170        pCB->activeQueries.clear();
4171        pCB->startedQueries.clear();
4172        pCB->imageSubresourceMap.clear();
4173        pCB->imageLayoutMap.clear();
4174        pCB->eventToStageMap.clear();
4175        pCB->drawData.clear();
4176        pCB->currentDrawData.buffers.clear();
4177        pCB->primaryCommandBuffer = VK_NULL_HANDLE;
4178        // Make sure any secondaryCommandBuffers are removed from globalInFlight
4179        for (auto secondary_cb : pCB->secondaryCommandBuffers) {
4180            dev_data->globalInFlightCmdBuffers.erase(secondary_cb);
4181        }
4182        pCB->secondaryCommandBuffers.clear();
4183        pCB->updateImages.clear();
4184        pCB->updateBuffers.clear();
4185        clear_cmd_buf_and_mem_references(dev_data, pCB);
4186        pCB->eventUpdates.clear();
4187        pCB->queryUpdates.clear();
4188
4189        // Remove object bindings
4190        for (auto obj : pCB->object_bindings) {
4191            removeCommandBufferBinding(dev_data, &obj, pCB);
4192        }
4193        pCB->object_bindings.clear();
4194        // Remove this cmdBuffer's reference from each FrameBuffer's CB ref list
4195        for (auto framebuffer : pCB->framebuffers) {
4196            auto fb_state = getFramebufferState(dev_data, framebuffer);
4197            if (fb_state)
4198                fb_state->cb_bindings.erase(pCB);
4199        }
4200        pCB->framebuffers.clear();
4201        pCB->activeFramebuffer = VK_NULL_HANDLE;
4202    }
4203}
4204
4205// Set PSO-related status bits for CB, including dynamic state set via PSO
4206static void set_cb_pso_status(GLOBAL_CB_NODE *pCB, const PIPELINE_STATE *pPipe) {
4207    // Account for any dynamic state not set via this PSO
4208    if (!pPipe->graphicsPipelineCI.pDynamicState ||
4209        !pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount) { // All state is static
4210        pCB->status |= CBSTATUS_ALL;
4211    } else {
4212        // First consider all state on
4213        // Then unset any state that's noted as dynamic in PSO
4214        // Finally OR that into CB statemask
4215        CBStatusFlags psoDynStateMask = CBSTATUS_ALL;
4216        for (uint32_t i = 0; i < pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
4217            switch (pPipe->graphicsPipelineCI.pDynamicState->pDynamicStates[i]) {
4218            case VK_DYNAMIC_STATE_LINE_WIDTH:
4219                psoDynStateMask &= ~CBSTATUS_LINE_WIDTH_SET;
4220                break;
4221            case VK_DYNAMIC_STATE_DEPTH_BIAS:
4222                psoDynStateMask &= ~CBSTATUS_DEPTH_BIAS_SET;
4223                break;
4224            case VK_DYNAMIC_STATE_BLEND_CONSTANTS:
4225                psoDynStateMask &= ~CBSTATUS_BLEND_CONSTANTS_SET;
4226                break;
4227            case VK_DYNAMIC_STATE_DEPTH_BOUNDS:
4228                psoDynStateMask &= ~CBSTATUS_DEPTH_BOUNDS_SET;
4229                break;
4230            case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK:
4231                psoDynStateMask &= ~CBSTATUS_STENCIL_READ_MASK_SET;
4232                break;
4233            case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK:
4234                psoDynStateMask &= ~CBSTATUS_STENCIL_WRITE_MASK_SET;
4235                break;
4236            case VK_DYNAMIC_STATE_STENCIL_REFERENCE:
4237                psoDynStateMask &= ~CBSTATUS_STENCIL_REFERENCE_SET;
4238                break;
4239            default:
4240                // TODO : Flag error here
4241                break;
4242            }
4243        }
4244        pCB->status |= psoDynStateMask;
4245    }
4246}
4247
4248// Print the last bound Gfx Pipeline
4249static bool printPipeline(layer_data *my_data, const VkCommandBuffer cb) {
4250    bool skip_call = false;
4251    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb);
4252    if (pCB) {
4253        PIPELINE_STATE *pPipeTrav = pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline_state;
4254        if (!pPipeTrav) {
4255            // nothing to print
4256        } else {
4257            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
4258                                 __LINE__, DRAWSTATE_NONE, "DS", "%s",
4259                                 vk_print_vkgraphicspipelinecreateinfo(
4260                                     reinterpret_cast<const VkGraphicsPipelineCreateInfo *>(&pPipeTrav->graphicsPipelineCI), "{DS}")
4261                                     .c_str());
4262        }
4263    }
4264    return skip_call;
4265}
4266
4267static void printCB(layer_data *my_data, const VkCommandBuffer cb) {
4268    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb);
4269    if (pCB && pCB->cmds.size() > 0) {
4270        log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4271                DRAWSTATE_NONE, "DS", "Cmds in CB 0x%p", (void *)cb);
4272        vector<CMD_NODE> cmds = pCB->cmds;
4273        for (auto ii = cmds.begin(); ii != cmds.end(); ++ii) {
4274            // TODO : Need to pass cb as srcObj here
4275            log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4276                    __LINE__, DRAWSTATE_NONE, "DS", "  CMD 0x%" PRIx64 ": %s", (*ii).cmdNumber, cmdTypeToString((*ii).type).c_str());
4277        }
4278    } else {
4279        // Nothing to print
4280    }
4281}
4282
4283static bool synchAndPrintDSConfig(layer_data *my_data, const VkCommandBuffer cb) {
4284    bool skip_call = false;
4285    if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
4286        return skip_call;
4287    }
4288    skip_call |= printPipeline(my_data, cb);
4289    return skip_call;
4290}
4291
4292// Flags validation error if the associated call is made inside a render pass. The apiName
4293// routine should ONLY be called outside a render pass.
4294static bool insideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName) {
4295    bool inside = false;
4296    if (pCB->activeRenderPass) {
4297        inside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4298                         (uint64_t)pCB->commandBuffer, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
4299                         "%s: It is invalid to issue this call inside an active render pass (0x%" PRIxLEAST64 ")", apiName,
4300                         (uint64_t)pCB->activeRenderPass->renderPass);
4301    }
4302    return inside;
4303}
4304
4305// Flags validation error if the associated call is made outside a render pass. The apiName
4306// routine should ONLY be called inside a render pass.
4307static bool outsideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName) {
4308    bool outside = false;
4309    if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (!pCB->activeRenderPass)) ||
4310        ((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && (!pCB->activeRenderPass) &&
4311         !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))) {
4312        outside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4313                          (uint64_t)pCB->commandBuffer, __LINE__, DRAWSTATE_NO_ACTIVE_RENDERPASS, "DS",
4314                          "%s: This call must be issued inside an active render pass.", apiName);
4315    }
4316    return outside;
4317}
4318
4319static void init_core_validation(instance_layer_data *instance_data, const VkAllocationCallbacks *pAllocator) {
4320
4321    layer_debug_actions(instance_data->report_data, instance_data->logging_callback, pAllocator, "lunarg_core_validation");
4322
4323}
4324
4325static void checkInstanceRegisterExtensions(const VkInstanceCreateInfo *pCreateInfo, instance_layer_data *instance_data) {
4326    for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
4327        if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SURFACE_EXTENSION_NAME))
4328            instance_data->surfaceExtensionEnabled = true;
4329        if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_DISPLAY_EXTENSION_NAME))
4330            instance_data->displayExtensionEnabled = true;
4331#ifdef VK_USE_PLATFORM_ANDROID_KHR
4332        if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_ANDROID_SURFACE_EXTENSION_NAME))
4333            instance_data->androidSurfaceExtensionEnabled = true;
4334#endif
4335#ifdef VK_USE_PLATFORM_MIR_KHR
4336        if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_MIR_SURFACE_EXTENSION_NAME))
4337            instance_data->mirSurfaceExtensionEnabled = true;
4338#endif
4339#ifdef VK_USE_PLATFORM_WAYLAND_KHR
4340        if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME))
4341            instance_data->waylandSurfaceExtensionEnabled = true;
4342#endif
4343#ifdef VK_USE_PLATFORM_WIN32_KHR
4344        if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_WIN32_SURFACE_EXTENSION_NAME))
4345            instance_data->win32SurfaceExtensionEnabled = true;
4346#endif
4347#ifdef VK_USE_PLATFORM_XCB_KHR
4348        if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_XCB_SURFACE_EXTENSION_NAME))
4349            instance_data->xcbSurfaceExtensionEnabled = true;
4350#endif
4351#ifdef VK_USE_PLATFORM_XLIB_KHR
4352        if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_XLIB_SURFACE_EXTENSION_NAME))
4353            instance_data->xlibSurfaceExtensionEnabled = true;
4354#endif
4355    }
4356}
4357
4358VKAPI_ATTR VkResult VKAPI_CALL
4359CreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkInstance *pInstance) {
4360    VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
4361
4362    assert(chain_info->u.pLayerInfo);
4363    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
4364    PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
4365    if (fpCreateInstance == NULL)
4366        return VK_ERROR_INITIALIZATION_FAILED;
4367
4368    // Advance the link info for the next element on the chain
4369    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
4370
4371    VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
4372    if (result != VK_SUCCESS)
4373        return result;
4374
4375    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(*pInstance), instance_layer_data_map);
4376    instance_data->instance = *pInstance;
4377    layer_init_instance_dispatch_table(*pInstance, &instance_data->dispatch_table, fpGetInstanceProcAddr);
4378
4379    instance_data->report_data = debug_report_create_instance(
4380        &instance_data->dispatch_table, *pInstance, pCreateInfo->enabledExtensionCount, pCreateInfo->ppEnabledExtensionNames);
4381    checkInstanceRegisterExtensions(pCreateInfo, instance_data);
4382    init_core_validation(instance_data, pAllocator);
4383
4384    instance_data->instance_state = unique_ptr<INSTANCE_STATE>(new INSTANCE_STATE());
4385    ValidateLayerOrdering(*pCreateInfo);
4386
4387    return result;
4388}
4389
4390/* hook DestroyInstance to remove tableInstanceMap entry */
4391VKAPI_ATTR void VKAPI_CALL DestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
4392    // TODOSC : Shouldn't need any customization here
4393    dispatch_key key = get_dispatch_key(instance);
4394    // TBD: Need any locking this early, in case this function is called at the
4395    // same time by more than one thread?
4396    instance_layer_data *instance_data = get_my_data_ptr(key, instance_layer_data_map);
4397    instance_data->dispatch_table.DestroyInstance(instance, pAllocator);
4398
4399    std::lock_guard<std::mutex> lock(global_lock);
4400    // Clean up logging callback, if any
4401    while (instance_data->logging_callback.size() > 0) {
4402        VkDebugReportCallbackEXT callback = instance_data->logging_callback.back();
4403        layer_destroy_msg_callback(instance_data->report_data, callback, pAllocator);
4404        instance_data->logging_callback.pop_back();
4405    }
4406
4407    layer_debug_report_destroy_instance(instance_data->report_data);
4408    layer_data_map.erase(key);
4409}
4410
4411static void checkDeviceRegisterExtensions(const VkDeviceCreateInfo *pCreateInfo, VkDevice device) {
4412    uint32_t i;
4413    // TBD: Need any locking, in case this function is called at the same time
4414    // by more than one thread?
4415    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4416    dev_data->device_extensions.wsi_enabled = false;
4417    dev_data->device_extensions.wsi_display_swapchain_enabled = false;
4418
4419    for (i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
4420        if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME) == 0)
4421            dev_data->device_extensions.wsi_enabled = true;
4422        if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_DISPLAY_SWAPCHAIN_EXTENSION_NAME) == 0)
4423            dev_data->device_extensions.wsi_display_swapchain_enabled = true;
4424    }
4425}
4426
4427// Verify that queue family has been properly requested
4428bool ValidateRequestedQueueFamilyProperties(instance_layer_data *instance_data, VkPhysicalDevice gpu, const VkDeviceCreateInfo *create_info) {
4429    bool skip_call = false;
4430    auto physical_device_state = getPhysicalDeviceState(instance_data, gpu);
4431    // First check is app has actually requested queueFamilyProperties
4432    if (!physical_device_state) {
4433        skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
4434                             0, __LINE__, DEVLIMITS_MUST_QUERY_COUNT, "DL",
4435                             "Invalid call to vkCreateDevice() w/o first calling vkEnumeratePhysicalDevices().");
4436    } else if (QUERY_DETAILS != physical_device_state->vkGetPhysicalDeviceQueueFamilyPropertiesState) {
4437        // TODO: This is not called out as an invalid use in the spec so make more informative recommendation.
4438        skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
4439                             VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST,
4440                             "DL", "Call to vkCreateDevice() w/o first calling vkGetPhysicalDeviceQueueFamilyProperties().");
4441    } else {
4442        // Check that the requested queue properties are valid
4443        for (uint32_t i = 0; i < create_info->queueCreateInfoCount; i++) {
4444            uint32_t requestedIndex = create_info->pQueueCreateInfos[i].queueFamilyIndex;
4445            if (requestedIndex >= physical_device_state->queue_family_properties.size()) {
4446                skip_call |= log_msg(
4447                    instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
4448                    __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST, "DL",
4449                    "Invalid queue create request in vkCreateDevice(). Invalid queueFamilyIndex %u requested.", requestedIndex);
4450            } else if (create_info->pQueueCreateInfos[i].queueCount >
4451                       physical_device_state->queue_family_properties[requestedIndex].queueCount) {
4452                skip_call |=
4453                    log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
4454                            0, __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST, "DL",
4455                            "Invalid queue create request in vkCreateDevice(). QueueFamilyIndex %u only has %u queues, but "
4456                            "requested queueCount is %u.",
4457                            requestedIndex, physical_device_state->queue_family_properties[requestedIndex].queueCount,
4458                            create_info->pQueueCreateInfos[i].queueCount);
4459            }
4460        }
4461    }
4462    return skip_call;
4463}
4464
4465// Verify that features have been queried and that they are available
4466static bool ValidateRequestedFeatures(instance_layer_data *dev_data, VkPhysicalDevice phys, const VkPhysicalDeviceFeatures *requested_features) {
4467    bool skip_call = false;
4468
4469    auto phys_device_state = getPhysicalDeviceState(dev_data, phys);
4470    const VkBool32 *actual = reinterpret_cast<VkBool32 *>(&phys_device_state->features);
4471    const VkBool32 *requested = reinterpret_cast<const VkBool32 *>(requested_features);
4472    // TODO : This is a nice, compact way to loop through struct, but a bad way to report issues
4473    //  Need to provide the struct member name with the issue. To do that seems like we'll
4474    //  have to loop through each struct member which should be done w/ codegen to keep in synch.
4475    uint32_t errors = 0;
4476    uint32_t total_bools = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
4477    for (uint32_t i = 0; i < total_bools; i++) {
4478        if (requested[i] > actual[i]) {
4479            // TODO: Add index to struct member name helper to be able to include a feature name
4480            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4481                VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_FEATURE_REQUESTED,
4482                "DL", "While calling vkCreateDevice(), requesting feature #%u in VkPhysicalDeviceFeatures struct, "
4483                "which is not available on this device.",
4484                i);
4485            errors++;
4486        }
4487    }
4488    if (errors && (UNCALLED == phys_device_state->vkGetPhysicalDeviceFeaturesState)) {
4489        // If user didn't request features, notify them that they should
4490        // TODO: Verify this against the spec. I believe this is an invalid use of the API and should return an error
4491        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4492                             VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_FEATURE_REQUESTED,
4493                             "DL", "You requested features that are unavailable on this device. You should first query feature "
4494                                   "availability by calling vkGetPhysicalDeviceFeatures().");
4495    }
4496    return skip_call;
4497}
4498
4499VKAPI_ATTR VkResult VKAPI_CALL CreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
4500                                            const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
4501    instance_layer_data *my_instance_data = get_my_data_ptr(get_dispatch_key(gpu), instance_layer_data_map);
4502    bool skip_call = false;
4503
4504    // Check that any requested features are available
4505    if (pCreateInfo->pEnabledFeatures) {
4506        skip_call |= ValidateRequestedFeatures(my_instance_data, gpu, pCreateInfo->pEnabledFeatures);
4507    }
4508    skip_call |= ValidateRequestedQueueFamilyProperties(my_instance_data, gpu, pCreateInfo);
4509
4510    if (skip_call) {
4511        return VK_ERROR_VALIDATION_FAILED_EXT;
4512    }
4513
4514    VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
4515
4516    assert(chain_info->u.pLayerInfo);
4517    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
4518    PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
4519    PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(my_instance_data->instance, "vkCreateDevice");
4520    if (fpCreateDevice == NULL) {
4521        return VK_ERROR_INITIALIZATION_FAILED;
4522    }
4523
4524    // Advance the link info for the next element on the chain
4525    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
4526
4527    VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
4528    if (result != VK_SUCCESS) {
4529        return result;
4530    }
4531
4532    std::unique_lock<std::mutex> lock(global_lock);
4533    layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(*pDevice), layer_data_map);
4534
4535    // Copy instance state into this device's layer_data struct
4536    my_device_data->instance_state = unique_ptr<INSTANCE_STATE>(new INSTANCE_STATE(*(my_instance_data->instance_state)));
4537    my_device_data->instance_data = my_instance_data;
4538    // Setup device dispatch table
4539    layer_init_device_dispatch_table(*pDevice, &my_device_data->dispatch_table, fpGetDeviceProcAddr);
4540    my_device_data->device = *pDevice;
4541
4542    my_device_data->report_data = layer_debug_report_create_device(my_instance_data->report_data, *pDevice);
4543    checkDeviceRegisterExtensions(pCreateInfo, *pDevice);
4544    // Get physical device limits for this device
4545    my_instance_data->dispatch_table.GetPhysicalDeviceProperties(gpu, &(my_device_data->phys_dev_properties.properties));
4546    uint32_t count;
4547    my_instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(gpu, &count, nullptr);
4548    my_device_data->phys_dev_properties.queue_family_properties.resize(count);
4549    my_instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(
4550        gpu, &count, &my_device_data->phys_dev_properties.queue_family_properties[0]);
4551    // TODO: device limits should make sure these are compatible
4552    if (pCreateInfo->pEnabledFeatures) {
4553        my_device_data->enabled_features = *pCreateInfo->pEnabledFeatures;
4554    } else {
4555        memset(&my_device_data->enabled_features, 0, sizeof(VkPhysicalDeviceFeatures));
4556    }
4557    // Store physical device mem limits into device layer_data struct
4558    my_instance_data->dispatch_table.GetPhysicalDeviceMemoryProperties(gpu, &my_device_data->phys_dev_mem_props);
4559    lock.unlock();
4560
4561    ValidateLayerOrdering(*pCreateInfo);
4562
4563    return result;
4564}
4565
4566// prototype
4567VKAPI_ATTR void VKAPI_CALL DestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
4568    // TODOSC : Shouldn't need any customization here
4569    dispatch_key key = get_dispatch_key(device);
4570    layer_data *dev_data = get_my_data_ptr(key, layer_data_map);
4571    // Free all the memory
4572    std::unique_lock<std::mutex> lock(global_lock);
4573    deletePipelines(dev_data);
4574    dev_data->renderPassMap.clear();
4575    deleteCommandBuffers(dev_data);
4576    // This will also delete all sets in the pool & remove them from setMap
4577    deletePools(dev_data);
4578    // All sets should be removed
4579    assert(dev_data->setMap.empty());
4580    for (auto del_layout : dev_data->descriptorSetLayoutMap) {
4581        delete del_layout.second;
4582    }
4583    dev_data->descriptorSetLayoutMap.clear();
4584    dev_data->imageViewMap.clear();
4585    dev_data->imageMap.clear();
4586    dev_data->imageSubresourceMap.clear();
4587    dev_data->imageLayoutMap.clear();
4588    dev_data->bufferViewMap.clear();
4589    dev_data->bufferMap.clear();
4590    // Queues persist until device is destroyed
4591    dev_data->queueMap.clear();
4592    lock.unlock();
4593#if MTMERGESOURCE
4594    bool skip_call = false;
4595    lock.lock();
4596    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
4597            (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM", "Printing List details prior to vkDestroyDevice()");
4598    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
4599            (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM", "================================================");
4600    print_mem_list(dev_data);
4601    printCBList(dev_data);
4602    // Report any memory leaks
4603    DEVICE_MEM_INFO *pInfo = NULL;
4604    if (!dev_data->memObjMap.empty()) {
4605        for (auto ii = dev_data->memObjMap.begin(); ii != dev_data->memObjMap.end(); ++ii) {
4606            pInfo = (*ii).second.get();
4607            if (pInfo->alloc_info.allocationSize != 0) {
4608                // Valid Usage: All child objects created on device must have been destroyed prior to destroying device
4609                skip_call |=
4610                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
4611                            (uint64_t)pInfo->mem, __LINE__, MEMTRACK_MEMORY_LEAK, "MEM",
4612                            "Mem Object 0x%" PRIx64 " has not been freed. You should clean up this memory by calling "
4613                            "vkFreeMemory(0x%" PRIx64 ") prior to vkDestroyDevice().",
4614                            (uint64_t)(pInfo->mem), (uint64_t)(pInfo->mem));
4615            }
4616        }
4617    }
4618    layer_debug_report_destroy_device(device);
4619    lock.unlock();
4620
4621#if DISPATCH_MAP_DEBUG
4622    fprintf(stderr, "Device: 0x%p, key: 0x%p\n", device, key);
4623#endif
4624    if (!skip_call) {
4625        dev_data->dispatch_table.DestroyDevice(device, pAllocator);
4626    }
4627#else
4628    dev_data->dispatch_table.DestroyDevice(device, pAllocator);
4629#endif
4630    layer_data_map.erase(key);
4631}
4632
4633static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION}};
4634
4635// This validates that the initial layout specified in the command buffer for
4636// the IMAGE is the same
4637// as the global IMAGE layout
4638static bool ValidateCmdBufImageLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
4639    bool skip_call = false;
4640    for (auto cb_image_data : pCB->imageLayoutMap) {
4641        VkImageLayout imageLayout;
4642        if (!FindLayout(dev_data, cb_image_data.first, imageLayout)) {
4643            skip_call |=
4644                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4645                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot submit cmd buffer using deleted image 0x%" PRIx64 ".",
4646                        reinterpret_cast<const uint64_t &>(cb_image_data.first));
4647        } else {
4648            if (cb_image_data.second.initialLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
4649                // TODO: Set memory invalid which is in mem_tracker currently
4650            } else if (imageLayout != cb_image_data.second.initialLayout) {
4651                if (cb_image_data.first.hasSubresource) {
4652                    skip_call |= log_msg(
4653                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4654                        reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
4655                        "Cannot submit cmd buffer using image (0x%" PRIx64 ") [sub-resource: aspectMask 0x%X array layer %u, mip level %u], "
4656                        "with layout %s when first use is %s.",
4657                        reinterpret_cast<const uint64_t &>(cb_image_data.first.image), cb_image_data.first.subresource.aspectMask,
4658                                cb_image_data.first.subresource.arrayLayer,
4659                                cb_image_data.first.subresource.mipLevel, string_VkImageLayout(imageLayout),
4660                        string_VkImageLayout(cb_image_data.second.initialLayout));
4661                } else {
4662                    skip_call |= log_msg(
4663                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4664                        reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
4665                        "Cannot submit cmd buffer using image (0x%" PRIx64 ") with layout %s when "
4666                        "first use is %s.",
4667                        reinterpret_cast<const uint64_t &>(cb_image_data.first.image), string_VkImageLayout(imageLayout),
4668                        string_VkImageLayout(cb_image_data.second.initialLayout));
4669                }
4670            }
4671            SetLayout(dev_data, cb_image_data.first, cb_image_data.second.layout);
4672        }
4673    }
4674    return skip_call;
4675}
4676
4677// Loop through bound objects and increment their in_use counts
4678//  For any unknown objects, flag an error
4679static bool ValidateAndIncrementBoundObjects(layer_data *dev_data, GLOBAL_CB_NODE const *cb_node) {
4680    bool skip = false;
4681    DRAW_STATE_ERROR error_code = DRAWSTATE_NONE;
4682    BASE_NODE *base_obj = nullptr;
4683    for (auto obj : cb_node->object_bindings) {
4684        switch (obj.type) {
4685        case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT: {
4686            base_obj = getSetNode(dev_data, reinterpret_cast<VkDescriptorSet &>(obj.handle));
4687            error_code = DRAWSTATE_INVALID_DESCRIPTOR_SET;
4688            break;
4689        }
4690        case VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT: {
4691            base_obj = getSamplerNode(dev_data, reinterpret_cast<VkSampler &>(obj.handle));
4692            error_code = DRAWSTATE_INVALID_SAMPLER;
4693            break;
4694        }
4695        case VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT: {
4696            base_obj = getQueryPoolNode(dev_data, reinterpret_cast<VkQueryPool &>(obj.handle));
4697            error_code = DRAWSTATE_INVALID_QUERY_POOL;
4698            break;
4699        }
4700        case VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT: {
4701            base_obj = getPipelineState(dev_data, reinterpret_cast<VkPipeline &>(obj.handle));
4702            error_code = DRAWSTATE_INVALID_PIPELINE;
4703            break;
4704        }
4705        case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
4706            base_obj = getBufferNode(dev_data, reinterpret_cast<VkBuffer &>(obj.handle));
4707            error_code = DRAWSTATE_INVALID_BUFFER;
4708            break;
4709        }
4710        case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT: {
4711            base_obj = getBufferViewState(dev_data, reinterpret_cast<VkBufferView &>(obj.handle));
4712            error_code = DRAWSTATE_INVALID_BUFFER_VIEW;
4713            break;
4714        }
4715        case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
4716            base_obj = getImageNode(dev_data, reinterpret_cast<VkImage &>(obj.handle));
4717            error_code = DRAWSTATE_INVALID_IMAGE;
4718            break;
4719        }
4720        case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT: {
4721            base_obj = getImageViewState(dev_data, reinterpret_cast<VkImageView &>(obj.handle));
4722            error_code = DRAWSTATE_INVALID_IMAGE_VIEW;
4723            break;
4724        }
4725        case VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT: {
4726            base_obj = getEventNode(dev_data, reinterpret_cast<VkEvent &>(obj.handle));
4727            error_code = DRAWSTATE_INVALID_EVENT;
4728            break;
4729        }
4730        case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT: {
4731            base_obj = getPoolNode(dev_data, reinterpret_cast<VkDescriptorPool &>(obj.handle));
4732            error_code = DRAWSTATE_INVALID_DESCRIPTOR_POOL;
4733            break;
4734        }
4735        case VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT: {
4736            base_obj = getCommandPoolNode(dev_data, reinterpret_cast<VkCommandPool &>(obj.handle));
4737            error_code = DRAWSTATE_INVALID_COMMAND_POOL;
4738            break;
4739        }
4740        case VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT: {
4741            base_obj = getFramebufferState(dev_data, reinterpret_cast<VkFramebuffer &>(obj.handle));
4742            error_code = DRAWSTATE_INVALID_FRAMEBUFFER;
4743            break;
4744        }
4745        case VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT: {
4746            base_obj = getRenderPass(dev_data, reinterpret_cast<VkRenderPass &>(obj.handle));
4747            error_code = DRAWSTATE_INVALID_RENDERPASS;
4748            break;
4749        }
4750        case VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT: {
4751            base_obj = getMemObjInfo(dev_data, reinterpret_cast<VkDeviceMemory &>(obj.handle));
4752            error_code = DRAWSTATE_INVALID_DEVICE_MEMORY;
4753            break;
4754        }
4755        default:
4756            // TODO : Merge handling of other objects types into this code
4757            break;
4758        }
4759        if (!base_obj) {
4760            skip |=
4761                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj.type, obj.handle, __LINE__, error_code, "DS",
4762                        "Cannot submit cmd buffer using deleted %s 0x%" PRIx64 ".", object_type_to_string(obj.type), obj.handle);
4763        } else {
4764            base_obj->in_use.fetch_add(1);
4765        }
4766    }
4767    return skip;
4768}
4769
4770// Track which resources are in-flight by atomically incrementing their "in_use" count
4771static bool validateAndIncrementResources(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
4772    bool skip_call = false;
4773
4774    cb_node->in_use.fetch_add(1);
4775    dev_data->globalInFlightCmdBuffers.insert(cb_node->commandBuffer);
4776
4777    // First Increment for all "generic" objects bound to cmd buffer, followed by special-case objects below
4778    skip_call |= ValidateAndIncrementBoundObjects(dev_data, cb_node);
4779    // TODO : We should be able to remove the NULL look-up checks from the code below as long as
4780    //  all the corresponding cases are verified to cause CB_INVALID state and the CB_INVALID state
4781    //  should then be flagged prior to calling this function
4782    for (auto drawDataElement : cb_node->drawData) {
4783        for (auto buffer : drawDataElement.buffers) {
4784            auto buffer_node = getBufferNode(dev_data, buffer);
4785            if (!buffer_node) {
4786                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
4787                                     (uint64_t)(buffer), __LINE__, DRAWSTATE_INVALID_BUFFER, "DS",
4788                                     "Cannot submit cmd buffer using deleted buffer 0x%" PRIx64 ".", (uint64_t)(buffer));
4789            } else {
4790                buffer_node->in_use.fetch_add(1);
4791            }
4792        }
4793    }
4794    for (auto event : cb_node->writeEventsBeforeWait) {
4795        auto event_node = getEventNode(dev_data, event);
4796        if (event_node)
4797            event_node->write_in_use++;
4798    }
4799    return skip_call;
4800}
4801
4802// Note: This function assumes that the global lock is held by the calling
4803// thread.
4804// TODO: untangle this.
4805static bool cleanInFlightCmdBuffer(layer_data *my_data, VkCommandBuffer cmdBuffer) {
4806    bool skip_call = false;
4807    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cmdBuffer);
4808    if (pCB) {
4809        for (auto queryEventsPair : pCB->waitedEventsBeforeQueryReset) {
4810            for (auto event : queryEventsPair.second) {
4811                if (my_data->eventMap[event].needsSignaled) {
4812                    skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4813                                         VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, 0, DRAWSTATE_INVALID_QUERY, "DS",
4814                                         "Cannot get query results on queryPool 0x%" PRIx64
4815                                         " with index %d which was guarded by unsignaled event 0x%" PRIx64 ".",
4816                                         (uint64_t)(queryEventsPair.first.pool), queryEventsPair.first.index, (uint64_t)(event));
4817                }
4818            }
4819        }
4820    }
4821    return skip_call;
4822}
4823
4824// TODO: nuke this completely.
4825// Decrement cmd_buffer in_use and if it goes to 0 remove cmd_buffer from globalInFlightCmdBuffers
4826static inline void removeInFlightCmdBuffer(layer_data *dev_data, VkCommandBuffer cmd_buffer) {
4827    // Pull it off of global list initially, but if we find it in any other queue list, add it back in
4828    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmd_buffer);
4829    pCB->in_use.fetch_sub(1);
4830    if (!pCB->in_use.load()) {
4831        dev_data->globalInFlightCmdBuffers.erase(cmd_buffer);
4832    }
4833}
4834
4835// Decrement in-use count for objects bound to command buffer
4836static void DecrementBoundResources(layer_data *dev_data, GLOBAL_CB_NODE const *cb_node) {
4837    BASE_NODE *base_obj = nullptr;
4838    for (auto obj : cb_node->object_bindings) {
4839        base_obj = GetStateStructPtrFromObject(dev_data, obj);
4840        if (base_obj) {
4841            base_obj->in_use.fetch_sub(1);
4842        }
4843    }
4844}
4845
4846static bool RetireWorkOnQueue(layer_data *dev_data, QUEUE_NODE *pQueue, uint64_t seq)
4847{
4848    bool skip_call = false; // TODO: extract everything that might fail to precheck
4849    std::unordered_map<VkQueue, uint64_t> otherQueueSeqs;
4850
4851    // Roll this queue forward, one submission at a time.
4852    while (pQueue->seq < seq) {
4853        auto & submission = pQueue->submissions.front();
4854
4855        for (auto & wait : submission.waitSemaphores) {
4856            auto pSemaphore = getSemaphoreNode(dev_data, wait.semaphore);
4857            pSemaphore->in_use.fetch_sub(1);
4858            auto & lastSeq = otherQueueSeqs[wait.queue];
4859            lastSeq = std::max(lastSeq, wait.seq);
4860        }
4861
4862        for (auto & semaphore : submission.signalSemaphores) {
4863            auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
4864            pSemaphore->in_use.fetch_sub(1);
4865        }
4866
4867        for (auto cb : submission.cbs) {
4868            auto cb_node = getCBNode(dev_data, cb);
4869            // First perform decrement on general case bound objects
4870            DecrementBoundResources(dev_data, cb_node);
4871            for (auto drawDataElement : cb_node->drawData) {
4872                for (auto buffer : drawDataElement.buffers) {
4873                    auto buffer_node = getBufferNode(dev_data, buffer);
4874                    if (buffer_node) {
4875                        buffer_node->in_use.fetch_sub(1);
4876                    }
4877                }
4878            }
4879            for (auto event : cb_node->writeEventsBeforeWait) {
4880                auto eventNode = dev_data->eventMap.find(event);
4881                if (eventNode != dev_data->eventMap.end()) {
4882                    eventNode->second.write_in_use--;
4883                }
4884            }
4885            for (auto queryStatePair : cb_node->queryToStateMap) {
4886                dev_data->queryToStateMap[queryStatePair.first] = queryStatePair.second;
4887            }
4888            for (auto eventStagePair : cb_node->eventToStageMap) {
4889                dev_data->eventMap[eventStagePair.first].stageMask = eventStagePair.second;
4890            }
4891
4892            skip_call |= cleanInFlightCmdBuffer(dev_data, cb);
4893            removeInFlightCmdBuffer(dev_data, cb);
4894        }
4895
4896        auto pFence = getFenceNode(dev_data, submission.fence);
4897        if (pFence) {
4898            pFence->state = FENCE_RETIRED;
4899        }
4900
4901        pQueue->submissions.pop_front();
4902        pQueue->seq++;
4903    }
4904
4905    // Roll other queues forward to the highest seq we saw a wait for
4906    for (auto qs : otherQueueSeqs) {
4907        skip_call |= RetireWorkOnQueue(dev_data, getQueueNode(dev_data, qs.first), qs.second);
4908    }
4909
4910    return skip_call;
4911}
4912
4913
4914// Submit a fence to a queue, delimiting previous fences and previous untracked
4915// work by it.
4916static void
4917SubmitFence(QUEUE_NODE *pQueue, FENCE_NODE *pFence, uint64_t submitCount)
4918{
4919    pFence->state = FENCE_INFLIGHT;
4920    pFence->signaler.first = pQueue->queue;
4921    pFence->signaler.second = pQueue->seq + pQueue->submissions.size() + submitCount;
4922}
4923
4924static bool validateCommandBufferSimultaneousUse(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
4925    bool skip_call = false;
4926    if (dev_data->globalInFlightCmdBuffers.count(pCB->commandBuffer) &&
4927        !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
4928        skip_call |=
4929            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4930                    __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
4931                    "Command Buffer 0x%" PRIx64 " is already in use and is not marked for simultaneous use.",
4932                    reinterpret_cast<uint64_t>(pCB->commandBuffer));
4933    }
4934    return skip_call;
4935}
4936
4937static bool validateCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const char *call_source) {
4938    bool skip = false;
4939    if (dev_data->instance_state->disabled.command_buffer_state)
4940        return skip;
4941    // Validate ONE_TIME_SUBMIT_BIT CB is not being submitted more than once
4942    if ((pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) && (pCB->submitCount > 1)) {
4943        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4944                        __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
4945                        "CB 0x%" PRIxLEAST64 " was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT "
4946                        "set, but has been submitted 0x%" PRIxLEAST64 " times.",
4947                        (uint64_t)(pCB->commandBuffer), pCB->submitCount);
4948    }
4949    // Validate that cmd buffers have been updated
4950    if (CB_RECORDED != pCB->state) {
4951        if (CB_INVALID == pCB->state) {
4952            // Inform app of reason CB invalid
4953            for (auto obj : pCB->broken_bindings) {
4954                const char *type_str = object_type_to_string(obj.type);
4955                // Descriptor sets are a special case that can be either destroyed or updated to invalidated a CB
4956                const char *cause_str =
4957                    (obj.type == VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT) ? "destroyed or updated" : "destroyed";
4958
4959                skip |=
4960                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4961                            reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4962                            "You are submitting command buffer 0x%" PRIxLEAST64 " that is invalid because bound %s 0x%" PRIxLEAST64
4963                            " was %s.",
4964                            reinterpret_cast<uint64_t &>(pCB->commandBuffer), type_str, obj.handle, cause_str);
4965            }
4966        } else { // Flag error for using CB w/o vkEndCommandBuffer() called
4967            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4968                            (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_NO_END_COMMAND_BUFFER, "DS",
4969                            "You must call vkEndCommandBuffer() on CB 0x%" PRIxLEAST64 " before this call to %s!",
4970                            reinterpret_cast<uint64_t &>(pCB->commandBuffer), call_source);
4971        }
4972    }
4973    return skip;
4974}
4975
4976// Validate that queueFamilyIndices of primary command buffers match this queue
4977// Secondary command buffers were previously validated in vkCmdExecuteCommands().
4978static bool validateQueueFamilyIndices(layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkQueue queue) {
4979    bool skip_call = false;
4980    auto pPool = getCommandPoolNode(dev_data, pCB->createInfo.commandPool);
4981    auto queue_node = getQueueNode(dev_data, queue);
4982
4983    if (pPool && queue_node && (pPool->queueFamilyIndex != queue_node->queueFamilyIndex)) {
4984        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4985            reinterpret_cast<uint64_t>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_QUEUE_FAMILY, "DS",
4986            "vkQueueSubmit: Primary command buffer 0x%" PRIxLEAST64
4987            " created in queue family %d is being submitted on queue 0x%" PRIxLEAST64 " from queue family %d.",
4988            reinterpret_cast<uint64_t>(pCB->commandBuffer), pPool->queueFamilyIndex,
4989            reinterpret_cast<uint64_t>(queue), queue_node->queueFamilyIndex);
4990    }
4991
4992    return skip_call;
4993}
4994
4995static bool validatePrimaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
4996    // Track in-use for resources off of primary and any secondary CBs
4997    bool skip_call = false;
4998
4999    // If USAGE_SIMULTANEOUS_USE_BIT not set then CB cannot already be executing
5000    // on device
5001    skip_call |= validateCommandBufferSimultaneousUse(dev_data, pCB);
5002
5003    skip_call |= validateAndIncrementResources(dev_data, pCB);
5004
5005    if (!pCB->secondaryCommandBuffers.empty()) {
5006        for (auto secondaryCmdBuffer : pCB->secondaryCommandBuffers) {
5007            GLOBAL_CB_NODE *pSubCB = getCBNode(dev_data, secondaryCmdBuffer);
5008            skip_call |= validateAndIncrementResources(dev_data, pSubCB);
5009            if ((pSubCB->primaryCommandBuffer != pCB->commandBuffer) &&
5010                !(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
5011                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
5012                        __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
5013                        "CB 0x%" PRIxLEAST64 " was submitted with secondary buffer 0x%" PRIxLEAST64
5014                        " but that buffer has subsequently been bound to "
5015                        "primary cmd buffer 0x%" PRIxLEAST64
5016                        " and it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set.",
5017                        reinterpret_cast<uint64_t>(pCB->commandBuffer), reinterpret_cast<uint64_t>(secondaryCmdBuffer),
5018                        reinterpret_cast<uint64_t>(pSubCB->primaryCommandBuffer));
5019            }
5020        }
5021    }
5022
5023    skip_call |= validateCommandBufferState(dev_data, pCB, "vkQueueSubmit()");
5024
5025    return skip_call;
5026}
5027
5028static bool
5029ValidateFenceForSubmit(layer_data *dev_data, FENCE_NODE *pFence)
5030{
5031    bool skip_call = false;
5032
5033    if (pFence) {
5034        if (pFence->state == FENCE_INFLIGHT) {
5035            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5036                                 (uint64_t)(pFence->fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
5037                                 "Fence 0x%" PRIx64 " is already in use by another submission.", (uint64_t)(pFence->fence));
5038        }
5039
5040        else if (pFence->state == FENCE_RETIRED) {
5041            skip_call |=
5042                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5043                        reinterpret_cast<uint64_t &>(pFence->fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
5044                        "Fence 0x%" PRIxLEAST64 " submitted in SIGNALED state.  Fences must be reset before being submitted",
5045                        reinterpret_cast<uint64_t &>(pFence->fence));
5046        }
5047    }
5048
5049    return skip_call;
5050}
5051
5052
5053VKAPI_ATTR VkResult VKAPI_CALL
5054QueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
5055    bool skip_call = false;
5056    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
5057    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5058    std::unique_lock<std::mutex> lock(global_lock);
5059
5060    auto pQueue = getQueueNode(dev_data, queue);
5061    auto pFence = getFenceNode(dev_data, fence);
5062    skip_call |= ValidateFenceForSubmit(dev_data, pFence);
5063
5064    if (skip_call) {
5065        return VK_ERROR_VALIDATION_FAILED_EXT;
5066    }
5067
5068    // TODO : Review these old print functions and clean up as appropriate
5069    print_mem_list(dev_data);
5070    printCBList(dev_data);
5071
5072    // Mark the fence in-use.
5073    if (pFence) {
5074        SubmitFence(pQueue, pFence, std::max(1u, submitCount));
5075    }
5076
5077    // Now verify each individual submit
5078    for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
5079        const VkSubmitInfo *submit = &pSubmits[submit_idx];
5080        vector<SEMAPHORE_WAIT> semaphore_waits;
5081        vector<VkSemaphore> semaphore_signals;
5082        for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
5083            VkSemaphore semaphore = submit->pWaitSemaphores[i];
5084            auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
5085            if (pSemaphore) {
5086                if (pSemaphore->signaled) {
5087                    if (pSemaphore->signaler.first != VK_NULL_HANDLE) {
5088                        semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second});
5089                        pSemaphore->in_use.fetch_add(1);
5090                    }
5091                    pSemaphore->signaler.first = VK_NULL_HANDLE;
5092                    pSemaphore->signaled = false;
5093                } else {
5094                    skip_call |=
5095                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
5096                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
5097                                "Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.",
5098                                reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
5099                }
5100            }
5101        }
5102        for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
5103            VkSemaphore semaphore = submit->pSignalSemaphores[i];
5104            auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
5105            if (pSemaphore) {
5106                if (pSemaphore->signaled) {
5107                    skip_call |=
5108                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
5109                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
5110                                "Queue 0x%" PRIx64 " is signaling semaphore 0x%" PRIx64
5111                                " that has already been signaled but not waited on by queue 0x%" PRIx64 ".",
5112                                reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore),
5113                                reinterpret_cast<uint64_t &>(pSemaphore->signaler.first));
5114                } else {
5115                    pSemaphore->signaler.first = queue;
5116                    pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1;
5117                    pSemaphore->signaled = true;
5118                    pSemaphore->in_use.fetch_add(1);
5119                    semaphore_signals.push_back(semaphore);
5120                }
5121            }
5122        }
5123
5124        std::vector<VkCommandBuffer> cbs;
5125
5126        for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
5127            auto pCBNode = getCBNode(dev_data, submit->pCommandBuffers[i]);
5128            skip_call |= ValidateCmdBufImageLayouts(dev_data, pCBNode);
5129            if (pCBNode) {
5130                cbs.push_back(submit->pCommandBuffers[i]);
5131                for (auto secondaryCmdBuffer : pCBNode->secondaryCommandBuffers) {
5132                    cbs.push_back(secondaryCmdBuffer);
5133                }
5134
5135                pCBNode->submitCount++; // increment submit count
5136                skip_call |= validatePrimaryCommandBufferState(dev_data, pCBNode);
5137                skip_call |= validateQueueFamilyIndices(dev_data, pCBNode, queue);
5138                // Potential early exit here as bad object state may crash in delayed function calls
5139                if (skip_call)
5140                    return result;
5141                // Call submit-time functions to validate/update state
5142                for (auto &function : pCBNode->validate_functions) {
5143                    skip_call |= function();
5144                }
5145                for (auto &function : pCBNode->eventUpdates) {
5146                    skip_call |= function(queue);
5147                }
5148                for (auto &function : pCBNode->queryUpdates) {
5149                    skip_call |= function(queue);
5150                }
5151            }
5152        }
5153
5154        pQueue->submissions.emplace_back(cbs, semaphore_waits, semaphore_signals,
5155                                         submit_idx == submitCount - 1 ? fence : VK_NULL_HANDLE);
5156    }
5157
5158    if (pFence && !submitCount) {
5159        // If no submissions, but just dropping a fence on the end of the queue,
5160        // record an empty submission with just the fence, so we can determine
5161        // its completion.
5162        pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(),
5163                                         std::vector<SEMAPHORE_WAIT>(),
5164                                         std::vector<VkSemaphore>(),
5165                                         fence);
5166    }
5167
5168    lock.unlock();
5169    if (!skip_call)
5170        result = dev_data->dispatch_table.QueueSubmit(queue, submitCount, pSubmits, fence);
5171
5172    return result;
5173}
5174
5175VKAPI_ATTR VkResult VKAPI_CALL AllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
5176                                              const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) {
5177    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5178    VkResult result = my_data->dispatch_table.AllocateMemory(device, pAllocateInfo, pAllocator, pMemory);
5179    // TODO : Track allocations and overall size here
5180    std::lock_guard<std::mutex> lock(global_lock);
5181    add_mem_obj_info(my_data, device, *pMemory, pAllocateInfo);
5182    print_mem_list(my_data);
5183    return result;
5184}
5185
5186VKAPI_ATTR void VKAPI_CALL
5187FreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
5188    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5189
5190    // From spec : A memory object is freed by calling vkFreeMemory() when it is no longer needed.
5191    // Before freeing a memory object, an application must ensure the memory object is no longer
5192    // in use by the device—for example by command buffers queued for execution. The memory need
5193    // not yet be unbound from all images and buffers, but any further use of those images or
5194    // buffers (on host or device) for anything other than destroying those objects will result in
5195    // undefined behavior.
5196
5197    std::unique_lock<std::mutex> lock(global_lock);
5198    bool skip_call = freeMemObjInfo(my_data, device, mem, false);
5199    print_mem_list(my_data);
5200    printCBList(my_data);
5201    lock.unlock();
5202    if (!skip_call) {
5203        my_data->dispatch_table.FreeMemory(device, mem, pAllocator);
5204    }
5205}
5206
5207// Validate that given Map memory range is valid. This means that the memory should not already be mapped,
5208//  and that the size of the map range should be:
5209//  1. Not zero
5210//  2. Within the size of the memory allocation
5211static bool ValidateMapMemRange(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
5212    bool skip_call = false;
5213
5214    if (size == 0) {
5215        skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5216                            (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5217                            "VkMapMemory: Attempting to map memory range of size zero");
5218    }
5219
5220    auto mem_element = my_data->memObjMap.find(mem);
5221    if (mem_element != my_data->memObjMap.end()) {
5222        auto mem_info = mem_element->second.get();
5223        // It is an application error to call VkMapMemory on an object that is already mapped
5224        if (mem_info->mem_range.size != 0) {
5225            skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5226                                (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5227                                "VkMapMemory: Attempting to map memory on an already-mapped object 0x%" PRIxLEAST64, (uint64_t)mem);
5228        }
5229
5230        // Validate that offset + size is within object's allocationSize
5231        if (size == VK_WHOLE_SIZE) {
5232            if (offset >= mem_info->alloc_info.allocationSize) {
5233                skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5234                                    VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP,
5235                                    "MEM", "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64
5236                                           " with size of VK_WHOLE_SIZE oversteps total array size 0x%" PRIx64,
5237                                    offset, mem_info->alloc_info.allocationSize, mem_info->alloc_info.allocationSize);
5238            }
5239        } else {
5240            if ((offset + size) > mem_info->alloc_info.allocationSize) {
5241                skip_call =
5242                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5243                            (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5244                            "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64 " oversteps total array size 0x%" PRIx64, offset,
5245                            size + offset, mem_info->alloc_info.allocationSize);
5246            }
5247        }
5248    }
5249    return skip_call;
5250}
5251
5252static void storeMemRanges(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
5253    auto mem_info = getMemObjInfo(my_data, mem);
5254    if (mem_info) {
5255        mem_info->mem_range.offset = offset;
5256        mem_info->mem_range.size = size;
5257    }
5258}
5259
5260static bool deleteMemRanges(layer_data *my_data, VkDeviceMemory mem) {
5261    bool skip_call = false;
5262    auto mem_info = getMemObjInfo(my_data, mem);
5263    if (mem_info) {
5264        if (!mem_info->mem_range.size) {
5265            // Valid Usage: memory must currently be mapped
5266            skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5267                                (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5268                                "Unmapping Memory without memory being mapped: mem obj 0x%" PRIxLEAST64, (uint64_t)mem);
5269        }
5270        mem_info->mem_range.size = 0;
5271        if (mem_info->shadow_copy) {
5272            free(mem_info->shadow_copy_base);
5273            mem_info->shadow_copy_base = 0;
5274            mem_info->shadow_copy = 0;
5275        }
5276    }
5277    return skip_call;
5278}
5279
5280// Guard value for pad data
5281static char NoncoherentMemoryFillValue = 0xb;
5282
5283static void initializeAndTrackMemory(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size,
5284                                     void **ppData) {
5285    auto mem_info = getMemObjInfo(dev_data, mem);
5286    if (mem_info) {
5287        mem_info->p_driver_data = *ppData;
5288        uint32_t index = mem_info->alloc_info.memoryTypeIndex;
5289        if (dev_data->phys_dev_mem_props.memoryTypes[index].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) {
5290            mem_info->shadow_copy = 0;
5291        } else {
5292            if (size == VK_WHOLE_SIZE) {
5293                size = mem_info->alloc_info.allocationSize - offset;
5294            }
5295            mem_info->shadow_pad_size = dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment;
5296            assert(vk_safe_modulo(mem_info->shadow_pad_size,
5297                                  dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment) == 0);
5298            // Ensure start of mapped region reflects hardware alignment constraints
5299            uint64_t map_alignment = dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment;
5300
5301            // From spec: (ppData - offset) must be aligned to at least limits::minMemoryMapAlignment.
5302            uint64_t start_offset = offset % map_alignment;
5303            // Data passed to driver will be wrapped by a guardband of data to detect over- or under-writes.
5304            mem_info->shadow_copy_base = malloc(static_cast<size_t>(2 * mem_info->shadow_pad_size + size + map_alignment + start_offset));
5305
5306            mem_info->shadow_copy =
5307                reinterpret_cast<char *>((reinterpret_cast<uintptr_t>(mem_info->shadow_copy_base) + map_alignment) &
5308                                         ~(map_alignment - 1)) + start_offset;
5309            assert(vk_safe_modulo(reinterpret_cast<uintptr_t>(mem_info->shadow_copy) + mem_info->shadow_pad_size - start_offset,
5310                                  map_alignment) == 0);
5311
5312            memset(mem_info->shadow_copy, NoncoherentMemoryFillValue, static_cast<size_t>(2 * mem_info->shadow_pad_size + size));
5313            *ppData = static_cast<char *>(mem_info->shadow_copy) + mem_info->shadow_pad_size;
5314        }
5315    }
5316}
5317
5318// Verify that state for fence being waited on is appropriate. That is,
5319//  a fence being waited on should not already be signaled and
5320//  it should have been submitted on a queue or during acquire next image
5321static inline bool verifyWaitFenceState(layer_data *dev_data, VkFence fence, const char *apiCall) {
5322    bool skip_call = false;
5323
5324    auto pFence = getFenceNode(dev_data, fence);
5325    if (pFence) {
5326        if (pFence->state == FENCE_UNSIGNALED) {
5327            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5328                                 reinterpret_cast<uint64_t &>(fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
5329                                 "%s called for fence 0x%" PRIxLEAST64 " which has not been submitted on a Queue or during "
5330                                 "acquire next image.",
5331                                 apiCall, reinterpret_cast<uint64_t &>(fence));
5332        }
5333    }
5334    return skip_call;
5335}
5336
5337static bool RetireFence(layer_data *dev_data, VkFence fence) {
5338    auto pFence = getFenceNode(dev_data, fence);
5339    if (pFence->signaler.first != VK_NULL_HANDLE) {
5340        /* Fence signaller is a queue -- use this as proof that prior operations
5341         * on that queue have completed.
5342         */
5343        return RetireWorkOnQueue(dev_data,
5344                                 getQueueNode(dev_data, pFence->signaler.first),
5345                                 pFence->signaler.second);
5346    }
5347    else {
5348        /* Fence signaller is the WSI. We're not tracking what the WSI op
5349         * actually /was/ in CV yet, but we need to mark the fence as retired.
5350         */
5351        pFence->state = FENCE_RETIRED;
5352        return false;
5353    }
5354}
5355
5356VKAPI_ATTR VkResult VKAPI_CALL
5357WaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll, uint64_t timeout) {
5358    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5359    bool skip_call = false;
5360    // Verify fence status of submitted fences
5361    std::unique_lock<std::mutex> lock(global_lock);
5362    for (uint32_t i = 0; i < fenceCount; i++) {
5363        skip_call |= verifyWaitFenceState(dev_data, pFences[i], "vkWaitForFences");
5364    }
5365    lock.unlock();
5366    if (skip_call)
5367        return VK_ERROR_VALIDATION_FAILED_EXT;
5368
5369    VkResult result = dev_data->dispatch_table.WaitForFences(device, fenceCount, pFences, waitAll, timeout);
5370
5371    if (result == VK_SUCCESS) {
5372        lock.lock();
5373        // When we know that all fences are complete we can clean/remove their CBs
5374        if (waitAll || fenceCount == 1) {
5375            for (uint32_t i = 0; i < fenceCount; i++) {
5376                skip_call |= RetireFence(dev_data, pFences[i]);
5377            }
5378        }
5379        // NOTE : Alternate case not handled here is when some fences have completed. In
5380        //  this case for app to guarantee which fences completed it will have to call
5381        //  vkGetFenceStatus() at which point we'll clean/remove their CBs if complete.
5382        lock.unlock();
5383    }
5384    if (skip_call)
5385        return VK_ERROR_VALIDATION_FAILED_EXT;
5386    return result;
5387}
5388
5389VKAPI_ATTR VkResult VKAPI_CALL GetFenceStatus(VkDevice device, VkFence fence) {
5390    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5391    bool skip_call = false;
5392    std::unique_lock<std::mutex> lock(global_lock);
5393    skip_call = verifyWaitFenceState(dev_data, fence, "vkGetFenceStatus");
5394    lock.unlock();
5395
5396    if (skip_call)
5397        return VK_ERROR_VALIDATION_FAILED_EXT;
5398
5399    VkResult result = dev_data->dispatch_table.GetFenceStatus(device, fence);
5400    lock.lock();
5401    if (result == VK_SUCCESS) {
5402        skip_call |= RetireFence(dev_data, fence);
5403    }
5404    lock.unlock();
5405    if (skip_call)
5406        return VK_ERROR_VALIDATION_FAILED_EXT;
5407    return result;
5408}
5409
5410VKAPI_ATTR void VKAPI_CALL GetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex,
5411                                                            VkQueue *pQueue) {
5412    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5413    dev_data->dispatch_table.GetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue);
5414    std::lock_guard<std::mutex> lock(global_lock);
5415
5416    // Add queue to tracking set only if it is new
5417    auto result = dev_data->queues.emplace(*pQueue);
5418    if (result.second == true) {
5419        QUEUE_NODE *pQNode = &dev_data->queueMap[*pQueue];
5420        pQNode->queue = *pQueue;
5421        pQNode->queueFamilyIndex = queueFamilyIndex;
5422        pQNode->seq = 0;
5423    }
5424}
5425
5426VKAPI_ATTR VkResult VKAPI_CALL QueueWaitIdle(VkQueue queue) {
5427    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
5428    bool skip_call = false;
5429    std::unique_lock<std::mutex> lock(global_lock);
5430    auto pQueue = getQueueNode(dev_data, queue);
5431    skip_call |= RetireWorkOnQueue(dev_data, pQueue, pQueue->seq + pQueue->submissions.size());
5432    lock.unlock();
5433    if (skip_call)
5434        return VK_ERROR_VALIDATION_FAILED_EXT;
5435    VkResult result = dev_data->dispatch_table.QueueWaitIdle(queue);
5436    return result;
5437}
5438
5439VKAPI_ATTR VkResult VKAPI_CALL DeviceWaitIdle(VkDevice device) {
5440    bool skip_call = false;
5441    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5442    std::unique_lock<std::mutex> lock(global_lock);
5443    for (auto & queue : dev_data->queueMap) {
5444        skip_call |= RetireWorkOnQueue(dev_data, &queue.second, queue.second.seq + queue.second.submissions.size());
5445    }
5446    lock.unlock();
5447    if (skip_call)
5448        return VK_ERROR_VALIDATION_FAILED_EXT;
5449    VkResult result = dev_data->dispatch_table.DeviceWaitIdle(device);
5450    return result;
5451}
5452
5453VKAPI_ATTR void VKAPI_CALL DestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) {
5454    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5455    bool skip_call = false;
5456    std::unique_lock<std::mutex> lock(global_lock);
5457    auto fence_pair = dev_data->fenceMap.find(fence);
5458    if (fence_pair != dev_data->fenceMap.end()) {
5459        if (fence_pair->second.state == FENCE_INFLIGHT) {
5460            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5461                                 (uint64_t)(fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS", "Fence 0x%" PRIx64 " is in use.",
5462                                 (uint64_t)(fence));
5463        }
5464        dev_data->fenceMap.erase(fence_pair);
5465    }
5466    lock.unlock();
5467
5468    if (!skip_call)
5469        dev_data->dispatch_table.DestroyFence(device, fence, pAllocator);
5470}
5471
5472// For given obj node, if it is use, flag a validation error and return callback result, else return false
5473bool ValidateObjectNotInUse(const layer_data *dev_data, BASE_NODE *obj_node, VK_OBJECT obj_struct,
5474                            UNIQUE_VALIDATION_ERROR_CODE error_code) {
5475    if (dev_data->instance_state->disabled.object_in_use)
5476        return false;
5477    bool skip = false;
5478    if (obj_node->in_use.load()) {
5479        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_struct.type, obj_struct.handle, __LINE__,
5480                        error_code, "DS", "Cannot delete %s 0x%" PRIx64 " that is currently in use by a command buffer. %s",
5481                        object_type_to_string(obj_struct.type), obj_struct.handle, validation_error_map[error_code]);
5482    }
5483    return skip;
5484}
5485
5486VKAPI_ATTR void VKAPI_CALL
5487DestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) {
5488    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5489    bool skip = false;
5490    std::unique_lock<std::mutex> lock(global_lock);
5491    auto sema_node = getSemaphoreNode(dev_data, semaphore);
5492    if (sema_node) {
5493        skip |= ValidateObjectNotInUse(dev_data, sema_node,
5494                                       {reinterpret_cast<uint64_t &>(semaphore), VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT},
5495                                       VALIDATION_ERROR_00199);
5496    }
5497    if (!skip) {
5498        dev_data->semaphoreMap.erase(semaphore);
5499        lock.unlock();
5500        dev_data->dispatch_table.DestroySemaphore(device, semaphore, pAllocator);
5501    }
5502}
5503
5504VKAPI_ATTR void VKAPI_CALL DestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
5505    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5506    bool skip = false;
5507    std::unique_lock<std::mutex> lock(global_lock);
5508    auto event_node = getEventNode(dev_data, event);
5509    if (event_node) {
5510        VK_OBJECT obj_struct = {reinterpret_cast<uint64_t &>(event), VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT};
5511        skip |= ValidateObjectNotInUse(dev_data, event_node, obj_struct, VALIDATION_ERROR_00213);
5512        // Any bound cmd buffers are now invalid
5513        invalidateCommandBuffers(event_node->cb_bindings, obj_struct);
5514    }
5515    if (!skip) {
5516        dev_data->eventMap.erase(event);
5517        lock.unlock();
5518        dev_data->dispatch_table.DestroyEvent(device, event, pAllocator);
5519    }
5520}
5521
5522VKAPI_ATTR void VKAPI_CALL
5523DestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) {
5524    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5525    bool skip = false;
5526    std::unique_lock<std::mutex> lock(global_lock);
5527    auto qp_node = getQueryPoolNode(dev_data, queryPool);
5528    if (qp_node) {
5529        VK_OBJECT obj_struct = {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT};
5530        skip |= ValidateObjectNotInUse(dev_data, qp_node, obj_struct, VALIDATION_ERROR_01012);
5531        // Any bound cmd buffers are now invalid
5532        invalidateCommandBuffers(qp_node->cb_bindings, obj_struct);
5533    }
5534    if (!skip) {
5535        dev_data->queryPoolMap.erase(queryPool);
5536        lock.unlock();
5537        dev_data->dispatch_table.DestroyQueryPool(device, queryPool, pAllocator);
5538    }
5539}
5540
5541VKAPI_ATTR VkResult VKAPI_CALL GetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery,
5542                                                   uint32_t queryCount, size_t dataSize, void *pData, VkDeviceSize stride,
5543                                                   VkQueryResultFlags flags) {
5544    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5545    unordered_map<QueryObject, vector<VkCommandBuffer>> queriesInFlight;
5546    std::unique_lock<std::mutex> lock(global_lock);
5547    for (auto cmdBuffer : dev_data->globalInFlightCmdBuffers) {
5548        auto pCB = getCBNode(dev_data, cmdBuffer);
5549        for (auto queryStatePair : pCB->queryToStateMap) {
5550            queriesInFlight[queryStatePair.first].push_back(cmdBuffer);
5551        }
5552    }
5553    bool skip_call = false;
5554    for (uint32_t i = 0; i < queryCount; ++i) {
5555        QueryObject query = {queryPool, firstQuery + i};
5556        auto queryElement = queriesInFlight.find(query);
5557        auto queryToStateElement = dev_data->queryToStateMap.find(query);
5558        if (queryToStateElement != dev_data->queryToStateMap.end()) {
5559            // Available and in flight
5560            if (queryElement != queriesInFlight.end() && queryToStateElement != dev_data->queryToStateMap.end() &&
5561                queryToStateElement->second) {
5562                for (auto cmdBuffer : queryElement->second) {
5563                    auto pCB = getCBNode(dev_data, cmdBuffer);
5564                    auto queryEventElement = pCB->waitedEventsBeforeQueryReset.find(query);
5565                    if (queryEventElement == pCB->waitedEventsBeforeQueryReset.end()) {
5566                        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5567                                             VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5568                                             "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is in flight.",
5569                                             (uint64_t)(queryPool), firstQuery + i);
5570                    } else {
5571                        for (auto event : queryEventElement->second) {
5572                            dev_data->eventMap[event].needsSignaled = true;
5573                        }
5574                    }
5575                }
5576                // Unavailable and in flight
5577            } else if (queryElement != queriesInFlight.end() && queryToStateElement != dev_data->queryToStateMap.end() &&
5578                       !queryToStateElement->second) {
5579                // TODO : Can there be the same query in use by multiple command buffers in flight?
5580                bool make_available = false;
5581                for (auto cmdBuffer : queryElement->second) {
5582                    auto pCB = getCBNode(dev_data, cmdBuffer);
5583                    make_available |= pCB->queryToStateMap[query];
5584                }
5585                if (!(((flags & VK_QUERY_RESULT_PARTIAL_BIT) || (flags & VK_QUERY_RESULT_WAIT_BIT)) && make_available)) {
5586                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5587                                         VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5588                                         "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is unavailable.",
5589                                         (uint64_t)(queryPool), firstQuery + i);
5590                }
5591                // Unavailable
5592            } else if (queryToStateElement != dev_data->queryToStateMap.end() && !queryToStateElement->second) {
5593                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5594                                     VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5595                                     "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is unavailable.",
5596                                     (uint64_t)(queryPool), firstQuery + i);
5597                // Unitialized
5598            } else if (queryToStateElement == dev_data->queryToStateMap.end()) {
5599                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5600                                     VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5601                                     "Cannot get query results on queryPool 0x%" PRIx64
5602                                     " with index %d as data has not been collected for this index.",
5603                                     (uint64_t)(queryPool), firstQuery + i);
5604            }
5605        }
5606    }
5607    lock.unlock();
5608    if (skip_call)
5609        return VK_ERROR_VALIDATION_FAILED_EXT;
5610    return dev_data->dispatch_table.GetQueryPoolResults(device, queryPool, firstQuery, queryCount, dataSize, pData, stride, flags);
5611}
5612
5613static bool validateIdleBuffer(const layer_data *my_data, VkBuffer buffer) {
5614    bool skip_call = false;
5615    auto buffer_node = getBufferNode(my_data, buffer);
5616    if (!buffer_node) {
5617        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5618                             (uint64_t)(buffer), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
5619                             "Cannot free buffer 0x%" PRIxLEAST64 " that has not been allocated.", (uint64_t)(buffer));
5620    } else {
5621        if (buffer_node->in_use.load()) {
5622            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5623                                 (uint64_t)(buffer), __LINE__, DRAWSTATE_OBJECT_INUSE, "DS",
5624                                 "Cannot free buffer 0x%" PRIxLEAST64 " that is in use by a command buffer.", (uint64_t)(buffer));
5625        }
5626    }
5627    return skip_call;
5628}
5629
5630// Return true if given ranges intersect, else false
5631// Prereq : For both ranges, range->end - range->start > 0. This case should have already resulted
5632//  in an error so not checking that here
5633// pad_ranges bool indicates a linear and non-linear comparison which requires padding
5634// In the case where padding is required, if an alias is encountered then a validation error is reported and skip_call
5635//  may be set by the callback function so caller should merge in skip_call value if padding case is possible.
5636static bool rangesIntersect(layer_data const *dev_data, MEMORY_RANGE const *range1, MEMORY_RANGE const *range2, bool *skip_call) {
5637    *skip_call = false;
5638    auto r1_start = range1->start;
5639    auto r1_end = range1->end;
5640    auto r2_start = range2->start;
5641    auto r2_end = range2->end;
5642    VkDeviceSize pad_align = 1;
5643    if (range1->linear != range2->linear) {
5644        pad_align = dev_data->phys_dev_properties.properties.limits.bufferImageGranularity;
5645    }
5646    if ((r1_end & ~(pad_align - 1)) < (r2_start & ~(pad_align - 1)))
5647        return false;
5648    if ((r1_start & ~(pad_align - 1)) > (r2_end & ~(pad_align - 1)))
5649        return false;
5650
5651    if (range1->linear != range2->linear) {
5652        // In linear vs. non-linear case, it's an error to alias
5653        const char *r1_linear_str = range1->linear ? "Linear" : "Non-linear";
5654        const char *r1_type_str = range1->image ? "image" : "buffer";
5655        const char *r2_linear_str = range2->linear ? "linear" : "non-linear";
5656        const char *r2_type_str = range2->image ? "image" : "buffer";
5657        auto obj_type = range1->image ? VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT : VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT;
5658        *skip_call |=
5659            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_type, range1->handle, 0, MEMTRACK_INVALID_ALIASING,
5660                    "MEM", "%s %s 0x%" PRIx64 " is aliased with %s %s 0x%" PRIx64
5661                           " which is in violation of the Buffer-Image Granularity section of the Vulkan specification.",
5662                    r1_linear_str, r1_type_str, range1->handle, r2_linear_str, r2_type_str, range2->handle);
5663    }
5664    // Ranges intersect
5665    return true;
5666}
5667// Simplified rangesIntersect that calls above function to check range1 for intersection with offset & end addresses
5668static bool rangesIntersect(layer_data const *dev_data, MEMORY_RANGE const *range1, VkDeviceSize offset, VkDeviceSize end) {
5669    // Create a local MEMORY_RANGE struct to wrap offset/size
5670    MEMORY_RANGE range_wrap;
5671    // Synch linear with range1 to avoid padding and potential validation error case
5672    range_wrap.linear = range1->linear;
5673    range_wrap.start = offset;
5674    range_wrap.end = end;
5675    bool tmp_bool;
5676    return rangesIntersect(dev_data, range1, &range_wrap, &tmp_bool);
5677}
5678// For given mem_info, set all ranges valid that intersect [offset-end] range
5679// TODO : For ranges where there is no alias, we may want to create new buffer ranges that are valid
5680static void SetMemRangesValid(layer_data const *dev_data, DEVICE_MEM_INFO *mem_info, VkDeviceSize offset, VkDeviceSize end) {
5681    bool tmp_bool = false;
5682    MEMORY_RANGE map_range;
5683    map_range.linear = true;
5684    map_range.start = offset;
5685    map_range.end = end;
5686    for (auto &handle_range_pair : mem_info->bound_ranges) {
5687        if (rangesIntersect(dev_data, &handle_range_pair.second, &map_range, &tmp_bool)) {
5688            // TODO : WARN here if tmp_bool true?
5689            handle_range_pair.second.valid = true;
5690        }
5691    }
5692}
5693// Object with given handle is being bound to memory w/ given mem_info struct.
5694//  Track the newly bound memory range with given memoryOffset
5695//  Also scan any previous ranges, track aliased ranges with new range, and flag an error if a linear
5696//  and non-linear range incorrectly overlap.
5697// Return true if an error is flagged and the user callback returns "true", otherwise false
5698// is_image indicates an image object, otherwise handle is for a buffer
5699// is_linear indicates a buffer or linear image
5700static bool InsertMemoryRange(layer_data const *dev_data, uint64_t handle, DEVICE_MEM_INFO *mem_info, VkDeviceSize memoryOffset,
5701                              VkMemoryRequirements memRequirements, bool is_image, bool is_linear) {
5702    bool skip_call = false;
5703    MEMORY_RANGE range;
5704
5705    range.image = is_image;
5706    range.handle = handle;
5707    range.linear = is_linear;
5708    range.valid = mem_info->global_valid;
5709    range.memory = mem_info->mem;
5710    range.start = memoryOffset;
5711    range.size = memRequirements.size;
5712    range.end = memoryOffset + memRequirements.size - 1;
5713    range.aliases.clear();
5714    // Update Memory aliasing
5715    // Save aliase ranges so we can copy into final map entry below. Can't do it in loop b/c we don't yet have final ptr. If we
5716    // inserted into map before loop to get the final ptr, then we may enter loop when not needed & we check range against itself
5717    std::unordered_set<MEMORY_RANGE *> tmp_alias_ranges;
5718    for (auto &obj_range_pair : mem_info->bound_ranges) {
5719        auto check_range = &obj_range_pair.second;
5720        bool intersection_error = false;
5721        if (rangesIntersect(dev_data, &range, check_range, &intersection_error)) {
5722            skip_call |= intersection_error;
5723            range.aliases.insert(check_range);
5724            tmp_alias_ranges.insert(check_range);
5725        }
5726    }
5727    mem_info->bound_ranges[handle] = std::move(range);
5728    for (auto tmp_range : tmp_alias_ranges) {
5729        tmp_range->aliases.insert(&mem_info->bound_ranges[handle]);
5730    }
5731    if (is_image)
5732        mem_info->bound_images.insert(handle);
5733    else
5734        mem_info->bound_buffers.insert(handle);
5735
5736    return skip_call;
5737}
5738
5739static bool InsertImageMemoryRange(layer_data const *dev_data, VkImage image, DEVICE_MEM_INFO *mem_info, VkDeviceSize mem_offset,
5740                                   VkMemoryRequirements mem_reqs, bool is_linear) {
5741    return InsertMemoryRange(dev_data, reinterpret_cast<uint64_t &>(image), mem_info, mem_offset, mem_reqs, true, is_linear);
5742}
5743
5744static bool InsertBufferMemoryRange(layer_data const *dev_data, VkBuffer buffer, DEVICE_MEM_INFO *mem_info, VkDeviceSize mem_offset,
5745                                    VkMemoryRequirements mem_reqs) {
5746    return InsertMemoryRange(dev_data, reinterpret_cast<uint64_t &>(buffer), mem_info, mem_offset, mem_reqs, false, true);
5747}
5748
5749// Remove MEMORY_RANGE struct for give handle from bound_ranges of mem_info
5750//  is_image indicates if handle is for image or buffer
5751//  This function will also remove the handle-to-index mapping from the appropriate
5752//  map and clean up any aliases for range being removed.
5753static void RemoveMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info, bool is_image) {
5754    auto erase_range = &mem_info->bound_ranges[handle];
5755    for (auto alias_range : erase_range->aliases) {
5756        alias_range->aliases.erase(erase_range);
5757    }
5758    erase_range->aliases.clear();
5759    mem_info->bound_ranges.erase(handle);
5760    if (is_image) {
5761        mem_info->bound_images.erase(handle);
5762    } else {
5763        mem_info->bound_buffers.erase(handle);
5764    }
5765}
5766
5767static void RemoveBufferMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info) { RemoveMemoryRange(handle, mem_info, false); }
5768
5769static void RemoveImageMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info) { RemoveMemoryRange(handle, mem_info, true); }
5770
5771VKAPI_ATTR void VKAPI_CALL DestroyBuffer(VkDevice device, VkBuffer buffer,
5772                                         const VkAllocationCallbacks *pAllocator) {
5773    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5774    std::unique_lock<std::mutex> lock(global_lock);
5775    if (!validateIdleBuffer(dev_data, buffer)) {
5776        // Clean up memory binding and range information for buffer
5777        auto buff_node = getBufferNode(dev_data, buffer);
5778        if (buff_node) {
5779            // Any bound cmd buffers are now invalid
5780            invalidateCommandBuffers(buff_node->cb_bindings,
5781                                     {reinterpret_cast<uint64_t &>(buff_node->buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT});
5782            auto mem_info = getMemObjInfo(dev_data, buff_node->mem);
5783            if (mem_info) {
5784                RemoveBufferMemoryRange(reinterpret_cast<uint64_t &>(buffer), mem_info);
5785            }
5786            clear_object_binding(dev_data, reinterpret_cast<uint64_t &>(buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT);
5787            dev_data->bufferMap.erase(buff_node->buffer);
5788        }
5789        lock.unlock();
5790        dev_data->dispatch_table.DestroyBuffer(device, buffer, pAllocator);
5791    }
5792}
5793
5794static bool PreCallValidateDestroyBufferView(layer_data *dev_data, VkBufferView buffer_view, BUFFER_VIEW_STATE **buffer_view_state,
5795                                             VK_OBJECT *obj_struct) {
5796    if (dev_data->instance_state->disabled.destroy_buffer_view)
5797        return false;
5798    bool skip = false;
5799    *buffer_view_state = getBufferViewState(dev_data, buffer_view);
5800    if (*buffer_view_state) {
5801        *obj_struct = {reinterpret_cast<uint64_t &>(buffer_view), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT};
5802        skip |= ValidateObjectNotInUse(dev_data, *buffer_view_state, *obj_struct, VALIDATION_ERROR_00701);
5803    }
5804    return skip;
5805}
5806
5807static void PostCallRecordDestroyBufferView(layer_data *dev_data, VkBufferView buffer_view, BUFFER_VIEW_STATE *buffer_view_state,
5808                                            VK_OBJECT obj_struct) {
5809    // Any bound cmd buffers are now invalid
5810    invalidateCommandBuffers(buffer_view_state->cb_bindings, obj_struct);
5811    dev_data->bufferViewMap.erase(buffer_view);
5812}
5813
5814VKAPI_ATTR void VKAPI_CALL
5815DestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator) {
5816    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5817    // Common data objects used pre & post call
5818    BUFFER_VIEW_STATE *buffer_view_state = nullptr;
5819    VK_OBJECT obj_struct;
5820    std::unique_lock<std::mutex> lock(global_lock);
5821    // Validate state before calling down chain, update common data if we'll be calling down chain
5822    bool skip = PreCallValidateDestroyBufferView(dev_data, bufferView, &buffer_view_state, &obj_struct);
5823    if (!skip) {
5824        lock.unlock();
5825        dev_data->dispatch_table.DestroyBufferView(device, bufferView, pAllocator);
5826        lock.lock();
5827        PostCallRecordDestroyBufferView(dev_data, bufferView, buffer_view_state, obj_struct);
5828    }
5829}
5830
5831VKAPI_ATTR void VKAPI_CALL DestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
5832    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5833    bool skip = false;
5834    std::unique_lock<std::mutex> lock(global_lock);
5835    auto img_node = getImageNode(dev_data, image);
5836    if (img_node) {
5837        VK_OBJECT obj_struct = {reinterpret_cast<uint64_t &>(img_node->image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT};
5838        // Any bound cmd buffers are now invalid
5839        invalidateCommandBuffers(img_node->cb_bindings, obj_struct);
5840        skip |= ValidateObjectNotInUse(dev_data, img_node, obj_struct, VALIDATION_ERROR_00743);
5841    }
5842    if (!skip) {
5843        // Clean up memory mapping, bindings and range references for image
5844        auto mem_info = getMemObjInfo(dev_data, img_node->mem);
5845        if (mem_info) {
5846            RemoveImageMemoryRange(reinterpret_cast<uint64_t &>(image), mem_info);
5847            clear_object_binding(dev_data, reinterpret_cast<uint64_t &>(image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
5848        }
5849        // Remove image from imageMap
5850        dev_data->imageMap.erase(img_node->image);
5851
5852        const auto &subEntry = dev_data->imageSubresourceMap.find(image);
5853        if (subEntry != dev_data->imageSubresourceMap.end()) {
5854            for (const auto &pair : subEntry->second) {
5855                dev_data->imageLayoutMap.erase(pair);
5856            }
5857            dev_data->imageSubresourceMap.erase(subEntry);
5858        }
5859        lock.unlock();
5860        dev_data->dispatch_table.DestroyImage(device, image, pAllocator);
5861    }
5862}
5863
5864static bool ValidateMemoryTypes(const layer_data *dev_data, const DEVICE_MEM_INFO *mem_info, const uint32_t memory_type_bits,
5865                                  const char *funcName) {
5866    bool skip_call = false;
5867    if (((1 << mem_info->alloc_info.memoryTypeIndex) & memory_type_bits) == 0) {
5868        skip_call = log_msg(
5869            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5870            reinterpret_cast<const uint64_t &>(mem_info->mem), __LINE__, MEMTRACK_INVALID_MEM_TYPE, "MT",
5871            "%s(): MemoryRequirements->memoryTypeBits (0x%X) for this object type are not compatible with the memory "
5872            "type (0x%X) of this memory object 0x%" PRIx64 ".",
5873            funcName, memory_type_bits, mem_info->alloc_info.memoryTypeIndex, reinterpret_cast<const uint64_t &>(mem_info->mem));
5874    }
5875    return skip_call;
5876}
5877
5878VKAPI_ATTR VkResult VKAPI_CALL
5879BindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
5880    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5881    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5882    std::unique_lock<std::mutex> lock(global_lock);
5883    // Track objects tied to memory
5884    uint64_t buffer_handle = reinterpret_cast<uint64_t &>(buffer);
5885    bool skip_call = SetMemBinding(dev_data, mem, buffer_handle, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "vkBindBufferMemory");
5886    auto buffer_node = getBufferNode(dev_data, buffer);
5887    if (buffer_node) {
5888        VkMemoryRequirements memRequirements;
5889        dev_data->dispatch_table.GetBufferMemoryRequirements(device, buffer, &memRequirements);
5890        buffer_node->mem = mem;
5891        buffer_node->memOffset = memoryOffset;
5892        buffer_node->memSize = memRequirements.size;
5893
5894        // Track and validate bound memory range information
5895        auto mem_info = getMemObjInfo(dev_data, mem);
5896        if (mem_info) {
5897            skip_call |= InsertBufferMemoryRange(dev_data, buffer, mem_info, memoryOffset, memRequirements);
5898            skip_call |= ValidateMemoryTypes(dev_data, mem_info, memRequirements.memoryTypeBits, "BindBufferMemory");
5899        }
5900
5901        // Validate memory requirements alignment
5902        if (vk_safe_modulo(memoryOffset, memRequirements.alignment) != 0) {
5903            skip_call |=
5904                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
5905                        __LINE__, DRAWSTATE_INVALID_BUFFER_MEMORY_OFFSET, "DS",
5906                        "vkBindBufferMemory(): memoryOffset is 0x%" PRIxLEAST64 " but must be an integer multiple of the "
5907                        "VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
5908                        ", returned from a call to vkGetBufferMemoryRequirements with buffer",
5909                        memoryOffset, memRequirements.alignment);
5910        }
5911
5912        // Validate device limits alignments
5913        static const VkBufferUsageFlagBits usage_list[3] = {
5914            static_cast<VkBufferUsageFlagBits>(VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT),
5915            VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT,
5916            VK_BUFFER_USAGE_STORAGE_BUFFER_BIT};
5917        static const char *memory_type[3] = {"texel",
5918                                             "uniform",
5919                                             "storage"};
5920        static const char *offset_name[3] = {
5921            "minTexelBufferOffsetAlignment",
5922            "minUniformBufferOffsetAlignment",
5923            "minStorageBufferOffsetAlignment"
5924        };
5925
5926        // Keep this one fresh!
5927        const VkDeviceSize offset_requirement[3] = {
5928            dev_data->phys_dev_properties.properties.limits.minTexelBufferOffsetAlignment,
5929            dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment,
5930            dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment
5931        };
5932        VkBufferUsageFlags usage = dev_data->bufferMap[buffer].get()->createInfo.usage;
5933
5934        for (int i = 0; i < 3; i++) {
5935            if (usage & usage_list[i]) {
5936                if (vk_safe_modulo(memoryOffset, offset_requirement[i]) != 0) {
5937                    skip_call |=
5938                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
5939                                0, __LINE__, DRAWSTATE_INVALID_TEXEL_BUFFER_OFFSET, "DS",
5940                                "vkBindBufferMemory(): %s memoryOffset is 0x%" PRIxLEAST64 " but must be a multiple of "
5941                                "device limit %s 0x%" PRIxLEAST64,
5942                                memory_type[i], memoryOffset, offset_name[i], offset_requirement[i]);
5943                }
5944            }
5945        }
5946    }
5947    print_mem_list(dev_data);
5948    lock.unlock();
5949    if (!skip_call) {
5950        result = dev_data->dispatch_table.BindBufferMemory(device, buffer, mem, memoryOffset);
5951    }
5952    return result;
5953}
5954
5955VKAPI_ATTR void VKAPI_CALL
5956GetBufferMemoryRequirements(VkDevice device, VkBuffer buffer, VkMemoryRequirements *pMemoryRequirements) {
5957    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5958    // TODO : What to track here?
5959    //   Could potentially save returned mem requirements and validate values passed into BindBufferMemory
5960    my_data->dispatch_table.GetBufferMemoryRequirements(device, buffer, pMemoryRequirements);
5961}
5962
5963VKAPI_ATTR void VKAPI_CALL
5964GetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements *pMemoryRequirements) {
5965    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5966    // TODO : What to track here?
5967    //   Could potentially save returned mem requirements and validate values passed into BindImageMemory
5968    my_data->dispatch_table.GetImageMemoryRequirements(device, image, pMemoryRequirements);
5969}
5970
5971static bool PreCallValidateDestroyImageView(layer_data *dev_data, VkImageView image_view, IMAGE_VIEW_STATE **image_view_state,
5972                                            VK_OBJECT *obj_struct) {
5973    if (dev_data->instance_state->disabled.destroy_image_view)
5974        return false;
5975    bool skip = false;
5976    *image_view_state = getImageViewState(dev_data, image_view);
5977    if (*image_view_state) {
5978        *obj_struct = {reinterpret_cast<uint64_t &>(image_view), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT};
5979        skip |= ValidateObjectNotInUse(dev_data, *image_view_state, *obj_struct, VALIDATION_ERROR_00776);
5980    }
5981    return skip;
5982}
5983
5984static void PostCallRecordDestroyImageView(layer_data *dev_data, VkImageView image_view, IMAGE_VIEW_STATE *image_view_state,
5985                                           VK_OBJECT obj_struct) {
5986    // Any bound cmd buffers are now invalid
5987    invalidateCommandBuffers(image_view_state->cb_bindings, obj_struct);
5988    dev_data->imageViewMap.erase(image_view);
5989}
5990
5991VKAPI_ATTR void VKAPI_CALL
5992DestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator) {
5993    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5994    // Common data objects used pre & post call
5995    IMAGE_VIEW_STATE *image_view_state = nullptr;
5996    VK_OBJECT obj_struct;
5997    std::unique_lock<std::mutex> lock(global_lock);
5998    bool skip = PreCallValidateDestroyImageView(dev_data, imageView, &image_view_state, &obj_struct);
5999    if (!skip) {
6000        lock.unlock();
6001        dev_data->dispatch_table.DestroyImageView(device, imageView, pAllocator);
6002        lock.lock();
6003        PostCallRecordDestroyImageView(dev_data, imageView, image_view_state, obj_struct);
6004    }
6005}
6006
6007VKAPI_ATTR void VKAPI_CALL
6008DestroyShaderModule(VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks *pAllocator) {
6009    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6010
6011    std::unique_lock<std::mutex> lock(global_lock);
6012    my_data->shaderModuleMap.erase(shaderModule);
6013    lock.unlock();
6014
6015    my_data->dispatch_table.DestroyShaderModule(device, shaderModule, pAllocator);
6016}
6017
6018static bool PreCallValidateDestroyPipeline(layer_data *dev_data, VkPipeline pipeline, PIPELINE_STATE **pipeline_state,
6019                                           VK_OBJECT *obj_struct) {
6020    if (dev_data->instance_state->disabled.destroy_pipeline)
6021        return false;
6022    bool skip = false;
6023    *pipeline_state = getPipelineState(dev_data, pipeline);
6024    if (*pipeline_state) {
6025        *obj_struct = {reinterpret_cast<uint64_t &>(pipeline), VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT};
6026        skip |= ValidateObjectNotInUse(dev_data, *pipeline_state, *obj_struct, VALIDATION_ERROR_00555);
6027    }
6028    return skip;
6029}
6030
6031static void PostCallRecordDestroyPipeline(layer_data *dev_data, VkPipeline pipeline, PIPELINE_STATE *pipeline_state,
6032                                          VK_OBJECT obj_struct) {
6033    // Any bound cmd buffers are now invalid
6034    invalidateCommandBuffers(pipeline_state->cb_bindings, obj_struct);
6035    dev_data->pipelineMap.erase(pipeline);
6036}
6037
6038VKAPI_ATTR void VKAPI_CALL
6039DestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) {
6040    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6041    PIPELINE_STATE *pipeline_state = nullptr;
6042    VK_OBJECT obj_struct;
6043    std::unique_lock<std::mutex> lock(global_lock);
6044    bool skip = PreCallValidateDestroyPipeline(dev_data, pipeline, &pipeline_state, &obj_struct);
6045    if (!skip) {
6046        lock.unlock();
6047        dev_data->dispatch_table.DestroyPipeline(device, pipeline, pAllocator);
6048        lock.lock();
6049        PostCallRecordDestroyPipeline(dev_data, pipeline, pipeline_state, obj_struct);
6050    }
6051}
6052
6053VKAPI_ATTR void VKAPI_CALL
6054DestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks *pAllocator) {
6055    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6056    std::unique_lock<std::mutex> lock(global_lock);
6057    dev_data->pipelineLayoutMap.erase(pipelineLayout);
6058    lock.unlock();
6059
6060    dev_data->dispatch_table.DestroyPipelineLayout(device, pipelineLayout, pAllocator);
6061}
6062
6063VKAPI_ATTR void VKAPI_CALL
6064DestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) {
6065    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6066    bool skip = false;
6067    std::unique_lock<std::mutex> lock(global_lock);
6068    auto sampler_node = getSamplerNode(dev_data, sampler);
6069    if (sampler_node) {
6070        VK_OBJECT obj_struct = {reinterpret_cast<uint64_t &>(sampler), VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT};
6071        skip |= ValidateObjectNotInUse(dev_data, sampler_node, obj_struct, VALIDATION_ERROR_00837);
6072        // Any bound cmd buffers are now invalid
6073        invalidateCommandBuffers(sampler_node->cb_bindings, obj_struct);
6074    }
6075    if (!skip) {
6076        dev_data->samplerMap.erase(sampler);
6077        lock.unlock();
6078        dev_data->dispatch_table.DestroySampler(device, sampler, pAllocator);
6079    }
6080}
6081
6082VKAPI_ATTR void VKAPI_CALL
6083DestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks *pAllocator) {
6084    // TODO : Clean up any internal data structures using this obj.
6085    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
6086        ->dispatch_table.DestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator);
6087}
6088
6089static bool PreCallValidateDestroyDescriptorPool(layer_data *dev_data, VkDescriptorPool pool,
6090                                                 DESCRIPTOR_POOL_NODE **desc_pool_state, VK_OBJECT *obj_struct) {
6091    if (dev_data->instance_state->disabled.destroy_descriptor_pool)
6092        return false;
6093    bool skip = false;
6094    *desc_pool_state = getPoolNode(dev_data, pool);
6095    if (*desc_pool_state) {
6096        *obj_struct = {reinterpret_cast<uint64_t &>(pool), VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT};
6097        skip |= ValidateObjectNotInUse(dev_data, *desc_pool_state, *obj_struct, VALIDATION_ERROR_00901);
6098    }
6099    return skip;
6100}
6101
6102static void PostCallRecordDestroyDescriptorPool(layer_data *dev_data, VkDescriptorPool descriptorPool,
6103                                                DESCRIPTOR_POOL_NODE *desc_pool_state, VK_OBJECT obj_struct) {
6104    // Any bound cmd buffers are now invalid
6105    invalidateCommandBuffers(desc_pool_state->cb_bindings, obj_struct);
6106    // Free sets that were in this pool
6107    for (auto ds : desc_pool_state->sets) {
6108        freeDescriptorSet(dev_data, ds);
6109    }
6110    dev_data->descriptorPoolMap.erase(descriptorPool);
6111}
6112
6113VKAPI_ATTR void VKAPI_CALL
6114DestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks *pAllocator) {
6115    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6116    DESCRIPTOR_POOL_NODE *desc_pool_state = nullptr;
6117    VK_OBJECT obj_struct;
6118    std::unique_lock<std::mutex> lock(global_lock);
6119    bool skip = PreCallValidateDestroyDescriptorPool(dev_data, descriptorPool, &desc_pool_state, &obj_struct);
6120    if (!skip) {
6121        lock.unlock();
6122        dev_data->dispatch_table.DestroyDescriptorPool(device, descriptorPool, pAllocator);
6123        lock.lock();
6124        PostCallRecordDestroyDescriptorPool(dev_data, descriptorPool, desc_pool_state, obj_struct);
6125    }
6126}
6127// Verify cmdBuffer in given cb_node is not in global in-flight set, and return skip_call result
6128//  If this is a secondary command buffer, then make sure its primary is also in-flight
6129//  If primary is not in-flight, then remove secondary from global in-flight set
6130// This function is only valid at a point when cmdBuffer is being reset or freed
6131static bool checkCommandBufferInFlight(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const char *action) {
6132    bool skip_call = false;
6133    if (dev_data->globalInFlightCmdBuffers.count(cb_node->commandBuffer)) {
6134        // Primary CB or secondary where primary is also in-flight is an error
6135        if ((cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_SECONDARY) ||
6136            (dev_data->globalInFlightCmdBuffers.count(cb_node->primaryCommandBuffer))) {
6137            skip_call |= log_msg(
6138                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6139                reinterpret_cast<const uint64_t &>(cb_node->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
6140                "Attempt to %s command buffer (0x%" PRIxLEAST64 ") which is in use.", action,
6141                reinterpret_cast<const uint64_t &>(cb_node->commandBuffer));
6142        }
6143    }
6144    return skip_call;
6145}
6146
6147// Iterate over all cmdBuffers in given commandPool and verify that each is not in use
6148static bool checkCommandBuffersInFlight(layer_data *dev_data, COMMAND_POOL_NODE *pPool, const char *action) {
6149    bool skip_call = false;
6150    for (auto cmd_buffer : pPool->commandBuffers) {
6151        if (dev_data->globalInFlightCmdBuffers.count(cmd_buffer)) {
6152            skip_call |= checkCommandBufferInFlight(dev_data, getCBNode(dev_data, cmd_buffer), action);
6153        }
6154    }
6155    return skip_call;
6156}
6157
6158static void clearCommandBuffersInFlight(layer_data *dev_data, COMMAND_POOL_NODE *pPool) {
6159    for (auto cmd_buffer : pPool->commandBuffers) {
6160        dev_data->globalInFlightCmdBuffers.erase(cmd_buffer);
6161    }
6162}
6163
6164VKAPI_ATTR void VKAPI_CALL
6165FreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer *pCommandBuffers) {
6166    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6167    bool skip_call = false;
6168    std::unique_lock<std::mutex> lock(global_lock);
6169
6170    for (uint32_t i = 0; i < commandBufferCount; i++) {
6171        auto cb_node = getCBNode(dev_data, pCommandBuffers[i]);
6172        // Delete CB information structure, and remove from commandBufferMap
6173        if (cb_node) {
6174            skip_call |= checkCommandBufferInFlight(dev_data, cb_node, "free");
6175        }
6176    }
6177
6178    if (skip_call)
6179        return;
6180
6181    auto pPool = getCommandPoolNode(dev_data, commandPool);
6182    for (uint32_t i = 0; i < commandBufferCount; i++) {
6183        auto cb_node = getCBNode(dev_data, pCommandBuffers[i]);
6184        // Delete CB information structure, and remove from commandBufferMap
6185        if (cb_node) {
6186            dev_data->globalInFlightCmdBuffers.erase(cb_node->commandBuffer);
6187            // reset prior to delete for data clean-up
6188            resetCB(dev_data, cb_node->commandBuffer);
6189            dev_data->commandBufferMap.erase(cb_node->commandBuffer);
6190            delete cb_node;
6191        }
6192
6193        // Remove commandBuffer reference from commandPoolMap
6194        pPool->commandBuffers.remove(pCommandBuffers[i]);
6195    }
6196    printCBList(dev_data);
6197    lock.unlock();
6198
6199    dev_data->dispatch_table.FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);
6200}
6201
6202VKAPI_ATTR VkResult VKAPI_CALL CreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
6203                                                 const VkAllocationCallbacks *pAllocator,
6204                                                 VkCommandPool *pCommandPool) {
6205    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6206
6207    VkResult result = dev_data->dispatch_table.CreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool);
6208
6209    if (VK_SUCCESS == result) {
6210        std::lock_guard<std::mutex> lock(global_lock);
6211        dev_data->commandPoolMap[*pCommandPool].createFlags = pCreateInfo->flags;
6212        dev_data->commandPoolMap[*pCommandPool].queueFamilyIndex = pCreateInfo->queueFamilyIndex;
6213    }
6214    return result;
6215}
6216
6217VKAPI_ATTR VkResult VKAPI_CALL CreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
6218                                               const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) {
6219
6220    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6221    VkResult result = dev_data->dispatch_table.CreateQueryPool(device, pCreateInfo, pAllocator, pQueryPool);
6222    if (result == VK_SUCCESS) {
6223        std::lock_guard<std::mutex> lock(global_lock);
6224        QUERY_POOL_NODE *qp_node = &dev_data->queryPoolMap[*pQueryPool];
6225        qp_node->createInfo = *pCreateInfo;
6226    }
6227    return result;
6228}
6229
6230// Destroy commandPool along with all of the commandBuffers allocated from that pool
6231VKAPI_ATTR void VKAPI_CALL
6232DestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
6233    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6234    bool skip_call = false;
6235    std::unique_lock<std::mutex> lock(global_lock);
6236    // Verify that command buffers in pool are complete (not in-flight)
6237    auto pPool = getCommandPoolNode(dev_data, commandPool);
6238    skip_call |= checkCommandBuffersInFlight(dev_data, pPool, "destroy command pool with");
6239
6240    if (skip_call)
6241        return;
6242    // Must remove cmdpool from cmdpoolmap, after removing all cmdbuffers in its list from the commandBufferMap
6243    clearCommandBuffersInFlight(dev_data, pPool);
6244    for (auto cb : pPool->commandBuffers) {
6245        clear_cmd_buf_and_mem_references(dev_data, cb);
6246        auto cb_node = getCBNode(dev_data, cb);
6247        // Remove references to this cb_node prior to delete
6248        // TODO : Need better solution here, resetCB?
6249        for (auto obj : cb_node->object_bindings) {
6250            removeCommandBufferBinding(dev_data, &obj, cb_node);
6251        }
6252        for (auto framebuffer : cb_node->framebuffers) {
6253            auto fb_state = getFramebufferState(dev_data, framebuffer);
6254            if (fb_state)
6255                fb_state->cb_bindings.erase(cb_node);
6256        }
6257        dev_data->commandBufferMap.erase(cb); // Remove this command buffer
6258        delete cb_node;                       // delete CB info structure
6259    }
6260    dev_data->commandPoolMap.erase(commandPool);
6261    lock.unlock();
6262
6263    dev_data->dispatch_table.DestroyCommandPool(device, commandPool, pAllocator);
6264}
6265
6266VKAPI_ATTR VkResult VKAPI_CALL
6267ResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) {
6268    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6269    bool skip_call = false;
6270
6271    std::unique_lock<std::mutex> lock(global_lock);
6272    auto pPool = getCommandPoolNode(dev_data, commandPool);
6273    skip_call |= checkCommandBuffersInFlight(dev_data, pPool, "reset command pool with");
6274    lock.unlock();
6275
6276    if (skip_call)
6277        return VK_ERROR_VALIDATION_FAILED_EXT;
6278
6279    VkResult result = dev_data->dispatch_table.ResetCommandPool(device, commandPool, flags);
6280
6281    // Reset all of the CBs allocated from this pool
6282    if (VK_SUCCESS == result) {
6283        lock.lock();
6284        clearCommandBuffersInFlight(dev_data, pPool);
6285        for (auto cmdBuffer : pPool->commandBuffers) {
6286            resetCB(dev_data, cmdBuffer);
6287        }
6288        lock.unlock();
6289    }
6290    return result;
6291}
6292
6293VKAPI_ATTR VkResult VKAPI_CALL ResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) {
6294    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6295    bool skip_call = false;
6296    std::unique_lock<std::mutex> lock(global_lock);
6297    for (uint32_t i = 0; i < fenceCount; ++i) {
6298        auto pFence = getFenceNode(dev_data, pFences[i]);
6299        if (pFence && pFence->state == FENCE_INFLIGHT) {
6300            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
6301                                 reinterpret_cast<const uint64_t &>(pFences[i]), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
6302                                 "Fence 0x%" PRIx64 " is in use.", reinterpret_cast<const uint64_t &>(pFences[i]));
6303        }
6304    }
6305    lock.unlock();
6306
6307    if (skip_call)
6308        return VK_ERROR_VALIDATION_FAILED_EXT;
6309
6310    VkResult result = dev_data->dispatch_table.ResetFences(device, fenceCount, pFences);
6311
6312    if (result == VK_SUCCESS) {
6313        lock.lock();
6314        for (uint32_t i = 0; i < fenceCount; ++i) {
6315            auto pFence = getFenceNode(dev_data, pFences[i]);
6316            if (pFence) {
6317                pFence->state = FENCE_UNSIGNALED;
6318            }
6319        }
6320        lock.unlock();
6321    }
6322
6323    return result;
6324}
6325
6326// For given cb_nodes, invalidate them and track object causing invalidation
6327void invalidateCommandBuffers(std::unordered_set<GLOBAL_CB_NODE *> cb_nodes, VK_OBJECT obj) {
6328    for (auto cb_node : cb_nodes) {
6329        cb_node->state = CB_INVALID;
6330        cb_node->broken_bindings.push_back(obj);
6331    }
6332}
6333
6334static bool PreCallValidateDestroyFramebuffer(layer_data *dev_data, VkFramebuffer framebuffer,
6335                                              FRAMEBUFFER_STATE **framebuffer_state, VK_OBJECT *obj_struct) {
6336    if (dev_data->instance_state->disabled.destroy_framebuffer)
6337        return false;
6338    bool skip = false;
6339    *framebuffer_state = getFramebufferState(dev_data, framebuffer);
6340    if (*framebuffer_state) {
6341        *obj_struct = {reinterpret_cast<uint64_t &>(framebuffer), VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT};
6342        skip |= ValidateObjectNotInUse(dev_data, *framebuffer_state, *obj_struct, VALIDATION_ERROR_00422);
6343    }
6344    return skip;
6345}
6346
6347static void PostCallRecordDestroyFramebuffer(layer_data *dev_data, VkFramebuffer framebuffer, FRAMEBUFFER_STATE *framebuffer_state,
6348                                             VK_OBJECT obj_struct) {
6349    invalidateCommandBuffers(framebuffer_state->cb_bindings, obj_struct);
6350    dev_data->frameBufferMap.erase(framebuffer);
6351}
6352
6353VKAPI_ATTR void VKAPI_CALL
6354DestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) {
6355    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6356    FRAMEBUFFER_STATE *framebuffer_state = nullptr;
6357    VK_OBJECT obj_struct;
6358    std::unique_lock<std::mutex> lock(global_lock);
6359    bool skip = PreCallValidateDestroyFramebuffer(dev_data, framebuffer, &framebuffer_state, &obj_struct);
6360    if (!skip) {
6361        lock.unlock();
6362        dev_data->dispatch_table.DestroyFramebuffer(device, framebuffer, pAllocator);
6363        lock.lock();
6364        PostCallRecordDestroyFramebuffer(dev_data, framebuffer, framebuffer_state, obj_struct);
6365    }
6366}
6367
6368VKAPI_ATTR void VKAPI_CALL
6369DestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) {
6370    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6371    bool skip = false;
6372    std::unique_lock<std::mutex> lock(global_lock);
6373    auto rp_state = getRenderPass(dev_data, renderPass);
6374    if (rp_state) {
6375        VK_OBJECT obj_struct = {reinterpret_cast<uint64_t &>(renderPass), VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT};
6376        skip |= ValidateObjectNotInUse(dev_data, rp_state, obj_struct, VALIDATION_ERROR_00393);
6377        // Any bound cmd buffers are now invalid
6378        invalidateCommandBuffers(rp_state->cb_bindings, obj_struct);
6379    }
6380    if (!skip) {
6381        dev_data->renderPassMap.erase(renderPass);
6382        lock.unlock();
6383        dev_data->dispatch_table.DestroyRenderPass(device, renderPass, pAllocator);
6384    }
6385}
6386
6387VKAPI_ATTR VkResult VKAPI_CALL CreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo,
6388                                            const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) {
6389    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6390    // TODO: Add check for VALIDATION_ERROR_00658
6391    // TODO: Add check for VALIDATION_ERROR_00666
6392    // TODO: Add check for VALIDATION_ERROR_00667
6393    // TODO: Add check for VALIDATION_ERROR_00668
6394    // TODO: Add check for VALIDATION_ERROR_00669
6395    VkResult result = dev_data->dispatch_table.CreateBuffer(device, pCreateInfo, pAllocator, pBuffer);
6396
6397    if (VK_SUCCESS == result) {
6398        std::lock_guard<std::mutex> lock(global_lock);
6399        // TODO : This doesn't create deep copy of pQueueFamilyIndices so need to fix that if/when we want that data to be valid
6400        dev_data->bufferMap.insert(std::make_pair(*pBuffer, unique_ptr<BUFFER_NODE>(new BUFFER_NODE(*pBuffer, pCreateInfo))));
6401    }
6402    return result;
6403}
6404
6405static bool PreCallValidateCreateBufferView(layer_data *dev_data, const VkBufferViewCreateInfo *pCreateInfo) {
6406    bool skip_call = false;
6407    BUFFER_NODE *buf_node = getBufferNode(dev_data, pCreateInfo->buffer);
6408    // If this isn't a sparse buffer, it needs to have memory backing it at CreateBufferView time
6409    if (buf_node) {
6410        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buf_node, "vkCreateBufferView()");
6411        // In order to create a valid buffer view, the buffer must have been created with at least one of the
6412        // following flags:  UNIFORM_TEXEL_BUFFER_BIT or STORAGE_TEXEL_BUFFER_BIT
6413        skip_call |= ValidateBufferUsageFlags(dev_data, buf_node,
6414                                              VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT,
6415                                              false, "vkCreateBufferView()", "VK_BUFFER_USAGE_[STORAGE|UNIFORM]_TEXEL_BUFFER_BIT");
6416    }
6417    return skip_call;
6418}
6419
6420VKAPI_ATTR VkResult VKAPI_CALL CreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo,
6421                                                const VkAllocationCallbacks *pAllocator, VkBufferView *pView) {
6422    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6423    std::unique_lock<std::mutex> lock(global_lock);
6424    bool skip_call = PreCallValidateCreateBufferView(dev_data, pCreateInfo);
6425    lock.unlock();
6426    if (skip_call)
6427        return VK_ERROR_VALIDATION_FAILED_EXT;
6428    VkResult result = dev_data->dispatch_table.CreateBufferView(device, pCreateInfo, pAllocator, pView);
6429    if (VK_SUCCESS == result) {
6430        lock.lock();
6431        dev_data->bufferViewMap[*pView] = unique_ptr<BUFFER_VIEW_STATE>(new BUFFER_VIEW_STATE(*pView, pCreateInfo));
6432        lock.unlock();
6433    }
6434    return result;
6435}
6436
6437VKAPI_ATTR VkResult VKAPI_CALL CreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo,
6438                                           const VkAllocationCallbacks *pAllocator, VkImage *pImage) {
6439    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6440
6441    VkResult result = dev_data->dispatch_table.CreateImage(device, pCreateInfo, pAllocator, pImage);
6442
6443    if (VK_SUCCESS == result) {
6444        std::lock_guard<std::mutex> lock(global_lock);
6445        IMAGE_LAYOUT_NODE image_node;
6446        image_node.layout = pCreateInfo->initialLayout;
6447        image_node.format = pCreateInfo->format;
6448        dev_data->imageMap.insert(std::make_pair(*pImage, unique_ptr<IMAGE_NODE>(new IMAGE_NODE(*pImage, pCreateInfo))));
6449        ImageSubresourcePair subpair = {*pImage, false, VkImageSubresource()};
6450        dev_data->imageSubresourceMap[*pImage].push_back(subpair);
6451        dev_data->imageLayoutMap[subpair] = image_node;
6452    }
6453    return result;
6454}
6455
6456static void ResolveRemainingLevelsLayers(layer_data *dev_data, VkImageSubresourceRange *range, VkImage image) {
6457    /* expects global_lock to be held by caller */
6458
6459    auto image_node = getImageNode(dev_data, image);
6460    if (image_node) {
6461        /* If the caller used the special values VK_REMAINING_MIP_LEVELS and
6462         * VK_REMAINING_ARRAY_LAYERS, resolve them now in our internal state to
6463         * the actual values.
6464         */
6465        if (range->levelCount == VK_REMAINING_MIP_LEVELS) {
6466            range->levelCount = image_node->createInfo.mipLevels - range->baseMipLevel;
6467        }
6468
6469        if (range->layerCount == VK_REMAINING_ARRAY_LAYERS) {
6470            range->layerCount = image_node->createInfo.arrayLayers - range->baseArrayLayer;
6471        }
6472    }
6473}
6474
6475// Return the correct layer/level counts if the caller used the special
6476// values VK_REMAINING_MIP_LEVELS or VK_REMAINING_ARRAY_LAYERS.
6477static void ResolveRemainingLevelsLayers(layer_data *dev_data, uint32_t *levels, uint32_t *layers, VkImageSubresourceRange range,
6478                                         VkImage image) {
6479    /* expects global_lock to be held by caller */
6480
6481    *levels = range.levelCount;
6482    *layers = range.layerCount;
6483    auto image_node = getImageNode(dev_data, image);
6484    if (image_node) {
6485        if (range.levelCount == VK_REMAINING_MIP_LEVELS) {
6486            *levels = image_node->createInfo.mipLevels - range.baseMipLevel;
6487        }
6488        if (range.layerCount == VK_REMAINING_ARRAY_LAYERS) {
6489            *layers = image_node->createInfo.arrayLayers - range.baseArrayLayer;
6490        }
6491    }
6492}
6493
6494static bool PreCallValidateCreateImageView(layer_data *dev_data, const VkImageViewCreateInfo *pCreateInfo) {
6495    bool skip_call = false;
6496    IMAGE_NODE *image_node = getImageNode(dev_data, pCreateInfo->image);
6497    if (image_node) {
6498        skip_call |= ValidateImageUsageFlags(
6499            dev_data, image_node, VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT |
6500                                      VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
6501            false, "vkCreateImageView()",
6502            "VK_IMAGE_USAGE_[SAMPLED|STORAGE|COLOR_ATTACHMENT|DEPTH_STENCIL_ATTACHMENT|INPUT_ATTACHMENT]_BIT");
6503        // If this isn't a sparse image, it needs to have memory backing it at CreateImageView time
6504        skip_call |= ValidateMemoryIsBoundToImage(dev_data, image_node, "vkCreateImageView()");
6505    }
6506    return skip_call;
6507}
6508
6509static inline void PostCallRecordCreateImageView(layer_data *dev_data, const VkImageViewCreateInfo *pCreateInfo, VkImageView view) {
6510    dev_data->imageViewMap[view] = unique_ptr<IMAGE_VIEW_STATE>(new IMAGE_VIEW_STATE(view, pCreateInfo));
6511    ResolveRemainingLevelsLayers(dev_data, &dev_data->imageViewMap[view].get()->create_info.subresourceRange, pCreateInfo->image);
6512}
6513
6514VKAPI_ATTR VkResult VKAPI_CALL CreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo,
6515                                               const VkAllocationCallbacks *pAllocator, VkImageView *pView) {
6516    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6517    std::unique_lock<std::mutex> lock(global_lock);
6518    bool skip_call = PreCallValidateCreateImageView(dev_data, pCreateInfo);
6519    lock.unlock();
6520    if (skip_call)
6521        return VK_ERROR_VALIDATION_FAILED_EXT;
6522    VkResult result = dev_data->dispatch_table.CreateImageView(device, pCreateInfo, pAllocator, pView);
6523    if (VK_SUCCESS == result) {
6524        lock.lock();
6525        PostCallRecordCreateImageView(dev_data, pCreateInfo, *pView);
6526        lock.unlock();
6527    }
6528
6529    return result;
6530}
6531
6532VKAPI_ATTR VkResult VKAPI_CALL
6533CreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkFence *pFence) {
6534    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6535    VkResult result = dev_data->dispatch_table.CreateFence(device, pCreateInfo, pAllocator, pFence);
6536    if (VK_SUCCESS == result) {
6537        std::lock_guard<std::mutex> lock(global_lock);
6538        auto &fence_node = dev_data->fenceMap[*pFence];
6539        fence_node.fence = *pFence;
6540        fence_node.createInfo = *pCreateInfo;
6541        fence_node.state = (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) ? FENCE_RETIRED : FENCE_UNSIGNALED;
6542    }
6543    return result;
6544}
6545
6546// TODO handle pipeline caches
6547VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo *pCreateInfo,
6548                                                   const VkAllocationCallbacks *pAllocator, VkPipelineCache *pPipelineCache) {
6549    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6550    VkResult result = dev_data->dispatch_table.CreatePipelineCache(device, pCreateInfo, pAllocator, pPipelineCache);
6551    return result;
6552}
6553
6554VKAPI_ATTR void VKAPI_CALL
6555DestroyPipelineCache(VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks *pAllocator) {
6556    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6557    dev_data->dispatch_table.DestroyPipelineCache(device, pipelineCache, pAllocator);
6558}
6559
6560VKAPI_ATTR VkResult VKAPI_CALL
6561GetPipelineCacheData(VkDevice device, VkPipelineCache pipelineCache, size_t *pDataSize, void *pData) {
6562    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6563    VkResult result = dev_data->dispatch_table.GetPipelineCacheData(device, pipelineCache, pDataSize, pData);
6564    return result;
6565}
6566
6567VKAPI_ATTR VkResult VKAPI_CALL
6568MergePipelineCaches(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache *pSrcCaches) {
6569    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6570    VkResult result = dev_data->dispatch_table.MergePipelineCaches(device, dstCache, srcCacheCount, pSrcCaches);
6571    return result;
6572}
6573
6574// utility function to set collective state for pipeline
6575void set_pipeline_state(PIPELINE_STATE *pPipe) {
6576    // If any attachment used by this pipeline has blendEnable, set top-level blendEnable
6577    if (pPipe->graphicsPipelineCI.pColorBlendState) {
6578        for (size_t i = 0; i < pPipe->attachments.size(); ++i) {
6579            if (VK_TRUE == pPipe->attachments[i].blendEnable) {
6580                if (((pPipe->attachments[i].dstAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6581                     (pPipe->attachments[i].dstAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6582                    ((pPipe->attachments[i].dstColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6583                     (pPipe->attachments[i].dstColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6584                    ((pPipe->attachments[i].srcAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6585                     (pPipe->attachments[i].srcAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6586                    ((pPipe->attachments[i].srcColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6587                     (pPipe->attachments[i].srcColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA))) {
6588                    pPipe->blendConstantsEnabled = true;
6589                }
6590            }
6591        }
6592    }
6593}
6594
6595VKAPI_ATTR VkResult VKAPI_CALL
6596CreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
6597                        const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
6598                        VkPipeline *pPipelines) {
6599    VkResult result = VK_SUCCESS;
6600    // TODO What to do with pipelineCache?
6601    // The order of operations here is a little convoluted but gets the job done
6602    //  1. Pipeline create state is first shadowed into PIPELINE_STATE struct
6603    //  2. Create state is then validated (which uses flags setup during shadowing)
6604    //  3. If everything looks good, we'll then create the pipeline and add NODE to pipelineMap
6605    bool skip_call = false;
6606    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
6607    vector<PIPELINE_STATE *> pPipeState(count);
6608    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6609
6610    uint32_t i = 0;
6611    std::unique_lock<std::mutex> lock(global_lock);
6612
6613    for (i = 0; i < count; i++) {
6614        pPipeState[i] = new PIPELINE_STATE;
6615        pPipeState[i]->initGraphicsPipeline(&pCreateInfos[i]);
6616        pPipeState[i]->render_pass_ci.initialize(getRenderPass(dev_data, pCreateInfos[i].renderPass)->createInfo.ptr());
6617        pPipeState[i]->pipeline_layout = *getPipelineStateLayout(dev_data, pCreateInfos[i].layout);
6618
6619        skip_call |= verifyPipelineCreateState(dev_data, device, pPipeState, i);
6620    }
6621
6622    if (!skip_call) {
6623        lock.unlock();
6624        result =
6625            dev_data->dispatch_table.CreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines);
6626        lock.lock();
6627        for (i = 0; i < count; i++) {
6628            pPipeState[i]->pipeline = pPipelines[i];
6629            dev_data->pipelineMap[pPipeState[i]->pipeline] = pPipeState[i];
6630        }
6631        lock.unlock();
6632    } else {
6633        for (i = 0; i < count; i++) {
6634            delete pPipeState[i];
6635        }
6636        lock.unlock();
6637        return VK_ERROR_VALIDATION_FAILED_EXT;
6638    }
6639    return result;
6640}
6641
6642VKAPI_ATTR VkResult VKAPI_CALL
6643CreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
6644                       const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
6645                       VkPipeline *pPipelines) {
6646    VkResult result = VK_SUCCESS;
6647    bool skip_call = false;
6648
6649    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
6650    vector<PIPELINE_STATE *> pPipeState(count);
6651    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6652
6653    uint32_t i = 0;
6654    std::unique_lock<std::mutex> lock(global_lock);
6655    for (i = 0; i < count; i++) {
6656        // TODO: Verify compute stage bits
6657
6658        // Create and initialize internal tracking data structure
6659        pPipeState[i] = new PIPELINE_STATE;
6660        pPipeState[i]->initComputePipeline(&pCreateInfos[i]);
6661        pPipeState[i]->pipeline_layout = *getPipelineStateLayout(dev_data, pCreateInfos[i].layout);
6662        // memcpy(&pPipeState[i]->computePipelineCI, (const void *)&pCreateInfos[i], sizeof(VkComputePipelineCreateInfo));
6663
6664        // TODO: Add Compute Pipeline Verification
6665        skip_call |= !validate_compute_pipeline(dev_data->report_data, pPipeState[i], &dev_data->enabled_features,
6666                                                dev_data->shaderModuleMap);
6667        // skip_call |= verifyPipelineCreateState(dev_data, device, pPipeState[i]);
6668    }
6669
6670    if (!skip_call) {
6671        lock.unlock();
6672        result =
6673            dev_data->dispatch_table.CreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines);
6674        lock.lock();
6675        for (i = 0; i < count; i++) {
6676            pPipeState[i]->pipeline = pPipelines[i];
6677            dev_data->pipelineMap[pPipeState[i]->pipeline] = pPipeState[i];
6678        }
6679        lock.unlock();
6680    } else {
6681        for (i = 0; i < count; i++) {
6682            // Clean up any locally allocated data structures
6683            delete pPipeState[i];
6684        }
6685        lock.unlock();
6686        return VK_ERROR_VALIDATION_FAILED_EXT;
6687    }
6688    return result;
6689}
6690
6691VKAPI_ATTR VkResult VKAPI_CALL CreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo,
6692                                             const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) {
6693    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6694    VkResult result = dev_data->dispatch_table.CreateSampler(device, pCreateInfo, pAllocator, pSampler);
6695    if (VK_SUCCESS == result) {
6696        std::lock_guard<std::mutex> lock(global_lock);
6697        dev_data->samplerMap[*pSampler] = unique_ptr<SAMPLER_NODE>(new SAMPLER_NODE(pSampler, pCreateInfo));
6698    }
6699    return result;
6700}
6701
6702VKAPI_ATTR VkResult VKAPI_CALL
6703CreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
6704                          const VkAllocationCallbacks *pAllocator, VkDescriptorSetLayout *pSetLayout) {
6705    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6706    VkResult result = dev_data->dispatch_table.CreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout);
6707    if (VK_SUCCESS == result) {
6708        // TODOSC : Capture layout bindings set
6709        std::lock_guard<std::mutex> lock(global_lock);
6710        dev_data->descriptorSetLayoutMap[*pSetLayout] =
6711            new cvdescriptorset::DescriptorSetLayout(dev_data->report_data, pCreateInfo, *pSetLayout);
6712    }
6713    return result;
6714}
6715
6716// Used by CreatePipelineLayout and CmdPushConstants.
6717// Note that the index argument is optional and only used by CreatePipelineLayout.
6718static bool validatePushConstantRange(const layer_data *dev_data, const uint32_t offset, const uint32_t size,
6719                                      const char *caller_name, uint32_t index = 0) {
6720    if (dev_data->instance_state->disabled.push_constant_range)
6721        return false;
6722    uint32_t const maxPushConstantsSize = dev_data->phys_dev_properties.properties.limits.maxPushConstantsSize;
6723    bool skip_call = false;
6724    // Check that offset + size don't exceed the max.
6725    // Prevent arithetic overflow here by avoiding addition and testing in this order.
6726    // TODO : This check combines VALIDATION_ERROR_00877 & 880, need to break out separately
6727    if ((offset >= maxPushConstantsSize) || (size > maxPushConstantsSize - offset)) {
6728        // This is a pain just to adapt the log message to the caller, but better to sort it out only when there is a problem.
6729        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
6730            skip_call |=
6731                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6732                        VALIDATION_ERROR_00877, "DS", "%s call has push constants index %u with offset %u and size %u that "
6733                                                      "exceeds this device's maxPushConstantSize of %u. %s",
6734                        caller_name, index, offset, size, maxPushConstantsSize, validation_error_map[VALIDATION_ERROR_00877]);
6735        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
6736            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6737                                 DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with offset %u and size %u that "
6738                                                                       "exceeds this device's maxPushConstantSize of %u.",
6739                                 caller_name, offset, size, maxPushConstantsSize);
6740        } else {
6741            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6742                                 DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
6743        }
6744    }
6745    // size needs to be non-zero and a multiple of 4.
6746    // TODO : This check combines VALIDATION_ERROR_00878 & 879, need to break out separately
6747    if ((size == 0) || ((size & 0x3) != 0)) {
6748        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
6749            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6750                                 VALIDATION_ERROR_00878, "DS", "%s call has push constants index %u with "
6751                                                               "size %u. Size must be greater than zero and a multiple of 4. %s",
6752                                 caller_name, index, size, validation_error_map[VALIDATION_ERROR_00878]);
6753        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
6754            skip_call |=
6755                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6756                        DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with "
6757                                                              "size %u. Size must be greater than zero and a multiple of 4.",
6758                        caller_name, size);
6759        } else {
6760            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6761                                 DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
6762        }
6763    }
6764    // offset needs to be a multiple of 4.
6765    if ((offset & 0x3) != 0) {
6766        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
6767            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6768                                 DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants index %u with "
6769                                                                       "offset %u. Offset must be a multiple of 4.",
6770                                 caller_name, index, offset);
6771        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
6772            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6773                                 DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with "
6774                                                                       "offset %u. Offset must be a multiple of 4.",
6775                                 caller_name, offset);
6776        } else {
6777            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6778                                 DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
6779        }
6780    }
6781    return skip_call;
6782}
6783
6784VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
6785                                                    const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout) {
6786    bool skip_call = false;
6787    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6788    // TODO : Add checks for VALIDATION_ERRORS 865-871
6789    // Push Constant Range checks
6790    uint32_t i, j;
6791    for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
6792        skip_call |= validatePushConstantRange(dev_data, pCreateInfo->pPushConstantRanges[i].offset,
6793                                               pCreateInfo->pPushConstantRanges[i].size, "vkCreatePipelineLayout()", i);
6794        if (0 == pCreateInfo->pPushConstantRanges[i].stageFlags) {
6795            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6796                                 DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCreatePipelineLayout() call has no stageFlags set.");
6797        }
6798    }
6799    if (skip_call)
6800        return VK_ERROR_VALIDATION_FAILED_EXT;
6801
6802    // Each range has been validated.  Now check for overlap between ranges (if they are good).
6803    // There's no explicit Valid Usage language against this, so issue a warning instead of an error.
6804    for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
6805        for (j = i + 1; j < pCreateInfo->pushConstantRangeCount; ++j) {
6806            const uint32_t minA = pCreateInfo->pPushConstantRanges[i].offset;
6807            const uint32_t maxA = minA + pCreateInfo->pPushConstantRanges[i].size;
6808            const uint32_t minB = pCreateInfo->pPushConstantRanges[j].offset;
6809            const uint32_t maxB = minB + pCreateInfo->pPushConstantRanges[j].size;
6810            if ((minA <= minB && maxA > minB) || (minB <= minA && maxB > minA)) {
6811                skip_call |=
6812                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6813                            DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCreatePipelineLayout() call has push constants with "
6814                                                                  "overlapping ranges: %u:[%u, %u), %u:[%u, %u)",
6815                            i, minA, maxA, j, minB, maxB);
6816            }
6817        }
6818    }
6819
6820    VkResult result = dev_data->dispatch_table.CreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout);
6821    if (VK_SUCCESS == result) {
6822        std::lock_guard<std::mutex> lock(global_lock);
6823        PIPELINE_LAYOUT_NODE &plNode = dev_data->pipelineLayoutMap[*pPipelineLayout];
6824        plNode.layout = *pPipelineLayout;
6825        plNode.set_layouts.resize(pCreateInfo->setLayoutCount);
6826        for (i = 0; i < pCreateInfo->setLayoutCount; ++i) {
6827            plNode.set_layouts[i] = getDescriptorSetLayout(dev_data, pCreateInfo->pSetLayouts[i]);
6828        }
6829        plNode.push_constant_ranges.resize(pCreateInfo->pushConstantRangeCount);
6830        for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
6831            plNode.push_constant_ranges[i] = pCreateInfo->pPushConstantRanges[i];
6832        }
6833    }
6834    return result;
6835}
6836
6837VKAPI_ATTR VkResult VKAPI_CALL
6838CreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
6839                     VkDescriptorPool *pDescriptorPool) {
6840    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6841    VkResult result = dev_data->dispatch_table.CreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool);
6842    if (VK_SUCCESS == result) {
6843        // Insert this pool into Global Pool LL at head
6844        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6845                    (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS", "Created Descriptor Pool 0x%" PRIxLEAST64,
6846                    (uint64_t)*pDescriptorPool))
6847            return VK_ERROR_VALIDATION_FAILED_EXT;
6848        DESCRIPTOR_POOL_NODE *pNewNode = new DESCRIPTOR_POOL_NODE(*pDescriptorPool, pCreateInfo);
6849        if (NULL == pNewNode) {
6850            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6851                        (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
6852                        "Out of memory while attempting to allocate DESCRIPTOR_POOL_NODE in vkCreateDescriptorPool()"))
6853                return VK_ERROR_VALIDATION_FAILED_EXT;
6854        } else {
6855            std::lock_guard<std::mutex> lock(global_lock);
6856            dev_data->descriptorPoolMap[*pDescriptorPool] = pNewNode;
6857        }
6858    } else {
6859        // Need to do anything if pool create fails?
6860    }
6861    return result;
6862}
6863
6864VKAPI_ATTR VkResult VKAPI_CALL
6865ResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags) {
6866    // TODO : Add checks for VALIDATION_ERROR_00928
6867    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6868    VkResult result = dev_data->dispatch_table.ResetDescriptorPool(device, descriptorPool, flags);
6869    if (VK_SUCCESS == result) {
6870        std::lock_guard<std::mutex> lock(global_lock);
6871        clearDescriptorPool(dev_data, device, descriptorPool, flags);
6872    }
6873    return result;
6874}
6875// Ensure the pool contains enough descriptors and descriptor sets to satisfy
6876// an allocation request. Fills common_data with the total number of descriptors of each type required,
6877// as well as DescriptorSetLayout ptrs used for later update.
6878static bool PreCallValidateAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo,
6879                                                  cvdescriptorset::AllocateDescriptorSetsData *common_data) {
6880    if (dev_data->instance_state->disabled.allocate_descriptor_sets)
6881        return false;
6882    // All state checks for AllocateDescriptorSets is done in single function
6883    return cvdescriptorset::ValidateAllocateDescriptorSets(dev_data->report_data, pAllocateInfo, dev_data, common_data);
6884}
6885// Allocation state was good and call down chain was made so update state based on allocating descriptor sets
6886static void PostCallRecordAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo,
6887                                                 VkDescriptorSet *pDescriptorSets,
6888                                                 const cvdescriptorset::AllocateDescriptorSetsData *common_data) {
6889    // All the updates are contained in a single cvdescriptorset function
6890    cvdescriptorset::PerformAllocateDescriptorSets(pAllocateInfo, pDescriptorSets, common_data, &dev_data->descriptorPoolMap,
6891                                                   &dev_data->setMap, dev_data);
6892}
6893
6894VKAPI_ATTR VkResult VKAPI_CALL
6895AllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo, VkDescriptorSet *pDescriptorSets) {
6896    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6897    std::unique_lock<std::mutex> lock(global_lock);
6898    cvdescriptorset::AllocateDescriptorSetsData common_data(pAllocateInfo->descriptorSetCount);
6899    bool skip_call = PreCallValidateAllocateDescriptorSets(dev_data, pAllocateInfo, &common_data);
6900    lock.unlock();
6901
6902    if (skip_call)
6903        return VK_ERROR_VALIDATION_FAILED_EXT;
6904
6905    VkResult result = dev_data->dispatch_table.AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
6906
6907    if (VK_SUCCESS == result) {
6908        lock.lock();
6909        PostCallRecordAllocateDescriptorSets(dev_data, pAllocateInfo, pDescriptorSets, &common_data);
6910        lock.unlock();
6911    }
6912    return result;
6913}
6914// Verify state before freeing DescriptorSets
6915static bool PreCallValidateFreeDescriptorSets(const layer_data *dev_data, VkDescriptorPool pool, uint32_t count,
6916                                              const VkDescriptorSet *descriptor_sets) {
6917    if (dev_data->instance_state->disabled.free_descriptor_sets)
6918        return false;
6919    bool skip_call = false;
6920    // First make sure sets being destroyed are not currently in-use
6921    for (uint32_t i = 0; i < count; ++i)
6922        skip_call |= validateIdleDescriptorSet(dev_data, descriptor_sets[i], "vkFreeDescriptorSets");
6923
6924    DESCRIPTOR_POOL_NODE *pool_node = getPoolNode(dev_data, pool);
6925    if (pool_node && !(VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT & pool_node->createInfo.flags)) {
6926        // Can't Free from a NON_FREE pool
6927        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6928                             reinterpret_cast<uint64_t &>(pool), __LINE__, VALIDATION_ERROR_00922, "DS",
6929                             "It is invalid to call vkFreeDescriptorSets() with a pool created without setting "
6930                             "VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT. %s",
6931                             validation_error_map[VALIDATION_ERROR_00922]);
6932    }
6933    return skip_call;
6934}
6935// Sets have been removed from the pool so update underlying state
6936static void PostCallRecordFreeDescriptorSets(layer_data *dev_data, VkDescriptorPool pool, uint32_t count,
6937                                             const VkDescriptorSet *descriptor_sets) {
6938    DESCRIPTOR_POOL_NODE *pool_state = getPoolNode(dev_data, pool);
6939    // Update available descriptor sets in pool
6940    pool_state->availableSets += count;
6941
6942    // For each freed descriptor add its resources back into the pool as available and remove from pool and setMap
6943    for (uint32_t i = 0; i < count; ++i) {
6944        auto set_state = dev_data->setMap[descriptor_sets[i]];
6945        uint32_t type_index = 0, descriptor_count = 0;
6946        for (uint32_t j = 0; j < set_state->GetBindingCount(); ++j) {
6947            type_index = static_cast<uint32_t>(set_state->GetTypeFromIndex(j));
6948            descriptor_count = set_state->GetDescriptorCountFromIndex(j);
6949            pool_state->availableDescriptorTypeCount[type_index] += descriptor_count;
6950        }
6951        freeDescriptorSet(dev_data, set_state);
6952        pool_state->sets.erase(set_state);
6953    }
6954}
6955
6956VKAPI_ATTR VkResult VKAPI_CALL
6957FreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count, const VkDescriptorSet *pDescriptorSets) {
6958    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6959    // Make sure that no sets being destroyed are in-flight
6960    std::unique_lock<std::mutex> lock(global_lock);
6961    bool skip_call = PreCallValidateFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
6962    lock.unlock();
6963
6964    if (skip_call)
6965        return VK_ERROR_VALIDATION_FAILED_EXT;
6966    VkResult result = dev_data->dispatch_table.FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets);
6967    if (VK_SUCCESS == result) {
6968        lock.lock();
6969        PostCallRecordFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
6970        lock.unlock();
6971    }
6972    return result;
6973}
6974// TODO : This is a Proof-of-concept for core validation architecture
6975//  Really we'll want to break out these functions to separate files but
6976//  keeping it all together here to prove out design
6977// PreCallValidate* handles validating all of the state prior to calling down chain to UpdateDescriptorSets()
6978static bool PreCallValidateUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
6979                                                const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
6980                                                const VkCopyDescriptorSet *pDescriptorCopies) {
6981    if (dev_data->instance_state->disabled.update_descriptor_sets)
6982        return false;
6983    // First thing to do is perform map look-ups.
6984    // NOTE : UpdateDescriptorSets is somewhat unique in that it's operating on a number of DescriptorSets
6985    //  so we can't just do a single map look-up up-front, but do them individually in functions below
6986
6987    // Now make call(s) that validate state, but don't perform state updates in this function
6988    // Note, here DescriptorSets is unique in that we don't yet have an instance. Using a helper function in the
6989    //  namespace which will parse params and make calls into specific class instances
6990    return cvdescriptorset::ValidateUpdateDescriptorSets(dev_data->report_data, dev_data, descriptorWriteCount, pDescriptorWrites,
6991                                                         descriptorCopyCount, pDescriptorCopies);
6992}
6993// PostCallRecord* handles recording state updates following call down chain to UpdateDescriptorSets()
6994static void PostCallRecordUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
6995                                               const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
6996                                               const VkCopyDescriptorSet *pDescriptorCopies) {
6997    cvdescriptorset::PerformUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6998                                                 pDescriptorCopies);
6999}
7000
7001VKAPI_ATTR void VKAPI_CALL
7002UpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites,
7003                     uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pDescriptorCopies) {
7004    // Only map look-up at top level is for device-level layer_data
7005    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
7006    std::unique_lock<std::mutex> lock(global_lock);
7007    bool skip_call = PreCallValidateUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
7008                                                         pDescriptorCopies);
7009    lock.unlock();
7010    if (!skip_call) {
7011        dev_data->dispatch_table.UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
7012                                                      pDescriptorCopies);
7013        lock.lock();
7014        // Since UpdateDescriptorSets() is void, nothing to check prior to updating state
7015        PostCallRecordUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
7016                                           pDescriptorCopies);
7017    }
7018}
7019
7020VKAPI_ATTR VkResult VKAPI_CALL
7021AllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pCreateInfo, VkCommandBuffer *pCommandBuffer) {
7022    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
7023    VkResult result = dev_data->dispatch_table.AllocateCommandBuffers(device, pCreateInfo, pCommandBuffer);
7024    if (VK_SUCCESS == result) {
7025        std::unique_lock<std::mutex> lock(global_lock);
7026        auto pPool = getCommandPoolNode(dev_data, pCreateInfo->commandPool);
7027
7028        if (pPool) {
7029            for (uint32_t i = 0; i < pCreateInfo->commandBufferCount; i++) {
7030                // Add command buffer to its commandPool map
7031                pPool->commandBuffers.push_back(pCommandBuffer[i]);
7032                GLOBAL_CB_NODE *pCB = new GLOBAL_CB_NODE;
7033                // Add command buffer to map
7034                dev_data->commandBufferMap[pCommandBuffer[i]] = pCB;
7035                resetCB(dev_data, pCommandBuffer[i]);
7036                pCB->createInfo = *pCreateInfo;
7037                pCB->device = device;
7038            }
7039        }
7040        printCBList(dev_data);
7041        lock.unlock();
7042    }
7043    return result;
7044}
7045
7046// Add bindings between the given cmd buffer & framebuffer and the framebuffer's children
7047static void AddFramebufferBinding(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, FRAMEBUFFER_STATE *fb_state) {
7048    fb_state->cb_bindings.insert(cb_state);
7049    for (auto attachment : fb_state->attachments) {
7050        auto view_state = attachment.view_state;
7051        if (view_state) {
7052            AddCommandBufferBindingImageView(dev_data, cb_state, view_state);
7053        }
7054        auto rp_state = getRenderPass(dev_data, fb_state->createInfo.renderPass);
7055        if (rp_state) {
7056            addCommandBufferBinding(
7057                &rp_state->cb_bindings,
7058                {reinterpret_cast<uint64_t &>(rp_state->renderPass), VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT}, cb_state);
7059        }
7060    }
7061}
7062
7063VKAPI_ATTR VkResult VKAPI_CALL
7064BeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) {
7065    bool skip_call = false;
7066    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7067    std::unique_lock<std::mutex> lock(global_lock);
7068    // Validate command buffer level
7069    GLOBAL_CB_NODE *cb_node = getCBNode(dev_data, commandBuffer);
7070    if (cb_node) {
7071        // This implicitly resets the Cmd Buffer so make sure any fence is done and then clear memory references
7072        if (dev_data->globalInFlightCmdBuffers.count(commandBuffer)) {
7073            skip_call |=
7074                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7075                        (uint64_t)commandBuffer, __LINE__, MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM",
7076                        "Calling vkBeginCommandBuffer() on active CB 0x%p before it has completed. "
7077                        "You must check CB fence before this call.",
7078                        commandBuffer);
7079        }
7080        clear_cmd_buf_and_mem_references(dev_data, cb_node);
7081        if (cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
7082            // Secondary Command Buffer
7083            const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo;
7084            if (!pInfo) {
7085                skip_call |=
7086                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7087                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
7088                            "vkBeginCommandBuffer(): Secondary Command Buffer (0x%p) must have inheritance info.",
7089                            reinterpret_cast<void *>(commandBuffer));
7090            } else {
7091                if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
7092                    if (!pInfo->renderPass) { // renderpass should NOT be null for a Secondary CB
7093                        skip_call |= log_msg(
7094                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7095                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
7096                            "vkBeginCommandBuffer(): Secondary Command Buffers (0x%p) must specify a valid renderpass parameter.",
7097                            reinterpret_cast<void *>(commandBuffer));
7098                    }
7099                    if (!pInfo->framebuffer) { // framebuffer may be null for a Secondary CB, but this affects perf
7100                        skip_call |= log_msg(
7101                            dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7102                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
7103                            "vkBeginCommandBuffer(): Secondary Command Buffers (0x%p) may perform better if a "
7104                            "valid framebuffer parameter is specified.",
7105                            reinterpret_cast<void *>(commandBuffer));
7106                    } else {
7107                        string errorString = "";
7108                        auto framebuffer = getFramebufferState(dev_data, pInfo->framebuffer);
7109                        if (framebuffer) {
7110                            if ((framebuffer->createInfo.renderPass != pInfo->renderPass) &&
7111                                !verify_renderpass_compatibility(dev_data, framebuffer->renderPassCreateInfo.ptr(),
7112                                                                 getRenderPass(dev_data, pInfo->renderPass)->createInfo.ptr(),
7113                                                                 errorString)) {
7114                                // renderPass that framebuffer was created with must be compatible with local renderPass
7115                                skip_call |= log_msg(
7116                                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7117                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(commandBuffer),
7118                                    __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
7119                                    "vkBeginCommandBuffer(): Secondary Command "
7120                                    "Buffer (0x%p) renderPass (0x%" PRIxLEAST64 ") is incompatible w/ framebuffer "
7121                                    "(0x%" PRIxLEAST64 ") w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
7122                                    reinterpret_cast<void *>(commandBuffer), reinterpret_cast<const uint64_t &>(pInfo->renderPass),
7123                                    reinterpret_cast<const uint64_t &>(pInfo->framebuffer),
7124                                    reinterpret_cast<uint64_t &>(framebuffer->createInfo.renderPass), errorString.c_str());
7125                            }
7126                            // Connect this framebuffer and its children to this cmdBuffer
7127                            AddFramebufferBinding(dev_data, cb_node, framebuffer);
7128                        }
7129                    }
7130                }
7131                if ((pInfo->occlusionQueryEnable == VK_FALSE ||
7132                     dev_data->enabled_features.occlusionQueryPrecise == VK_FALSE) &&
7133                    (pInfo->queryFlags & VK_QUERY_CONTROL_PRECISE_BIT)) {
7134                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7135                                         VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(commandBuffer),
7136                                         __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
7137                                         "vkBeginCommandBuffer(): Secondary Command Buffer (0x%p) must not have "
7138                                         "VK_QUERY_CONTROL_PRECISE_BIT if occulusionQuery is disabled or the device does not "
7139                                         "support precise occlusion queries.",
7140                                         reinterpret_cast<void *>(commandBuffer));
7141                }
7142            }
7143            if (pInfo && pInfo->renderPass != VK_NULL_HANDLE) {
7144                auto renderPass = getRenderPass(dev_data, pInfo->renderPass);
7145                if (renderPass) {
7146                    if (pInfo->subpass >= renderPass->createInfo.subpassCount) {
7147                        skip_call |= log_msg(
7148                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7149                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
7150                            "vkBeginCommandBuffer(): Secondary Command Buffers (0x%p) must has a subpass index (%d) "
7151                            "that is less than the number of subpasses (%d).",
7152                            (void *)commandBuffer, pInfo->subpass, renderPass->createInfo.subpassCount);
7153                    }
7154                }
7155            }
7156        }
7157        if (CB_RECORDING == cb_node->state) {
7158            skip_call |=
7159                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7160                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
7161                        "vkBeginCommandBuffer(): Cannot call Begin on CB (0x%" PRIxLEAST64
7162                        ") in the RECORDING state. Must first call vkEndCommandBuffer().",
7163                        (uint64_t)commandBuffer);
7164        } else if (CB_RECORDED == cb_node->state || (CB_INVALID == cb_node->state && CMD_END == cb_node->cmds.back().type)) {
7165            VkCommandPool cmdPool = cb_node->createInfo.commandPool;
7166            auto pPool = getCommandPoolNode(dev_data, cmdPool);
7167            if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
7168                skip_call |=
7169                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7170                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
7171                            "Call to vkBeginCommandBuffer() on command buffer (0x%" PRIxLEAST64
7172                            ") attempts to implicitly reset cmdBuffer created from command pool (0x%" PRIxLEAST64
7173                            ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
7174                            (uint64_t)commandBuffer, (uint64_t)cmdPool);
7175            }
7176            resetCB(dev_data, commandBuffer);
7177        }
7178        // Set updated state here in case implicit reset occurs above
7179        cb_node->state = CB_RECORDING;
7180        cb_node->beginInfo = *pBeginInfo;
7181        if (cb_node->beginInfo.pInheritanceInfo) {
7182            cb_node->inheritanceInfo = *(cb_node->beginInfo.pInheritanceInfo);
7183            cb_node->beginInfo.pInheritanceInfo = &cb_node->inheritanceInfo;
7184            // If we are a secondary command-buffer and inheriting.  Update the items we should inherit.
7185            if ((cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) &&
7186                (cb_node->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
7187                cb_node->activeRenderPass = getRenderPass(dev_data, cb_node->beginInfo.pInheritanceInfo->renderPass);
7188                cb_node->activeSubpass = cb_node->beginInfo.pInheritanceInfo->subpass;
7189                cb_node->framebuffers.insert(cb_node->beginInfo.pInheritanceInfo->framebuffer);
7190            }
7191        }
7192    } else {
7193        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7194                             (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
7195                             "In vkBeginCommandBuffer() and unable to find CommandBuffer Node for CB 0x%p!", (void *)commandBuffer);
7196    }
7197    lock.unlock();
7198    if (skip_call) {
7199        return VK_ERROR_VALIDATION_FAILED_EXT;
7200    }
7201    VkResult result = dev_data->dispatch_table.BeginCommandBuffer(commandBuffer, pBeginInfo);
7202
7203    return result;
7204}
7205
7206VKAPI_ATTR VkResult VKAPI_CALL EndCommandBuffer(VkCommandBuffer commandBuffer) {
7207    bool skip_call = false;
7208    VkResult result = VK_SUCCESS;
7209    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7210    std::unique_lock<std::mutex> lock(global_lock);
7211    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7212    if (pCB) {
7213        if ((VK_COMMAND_BUFFER_LEVEL_PRIMARY == pCB->createInfo.level) || !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
7214            // This needs spec clarification to update valid usage, see comments in PR:
7215            // https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/pull/516#discussion_r63013756
7216            skip_call |= insideRenderPass(dev_data, pCB, "vkEndCommandBuffer");
7217        }
7218        skip_call |= addCmd(dev_data, pCB, CMD_END, "vkEndCommandBuffer()");
7219        for (auto query : pCB->activeQueries) {
7220            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7221                                 DRAWSTATE_INVALID_QUERY, "DS",
7222                                 "Ending command buffer with in progress query: queryPool 0x%" PRIx64 ", index %d",
7223                                 (uint64_t)(query.pool), query.index);
7224        }
7225    }
7226    if (!skip_call) {
7227        lock.unlock();
7228        result = dev_data->dispatch_table.EndCommandBuffer(commandBuffer);
7229        lock.lock();
7230        if (VK_SUCCESS == result) {
7231            pCB->state = CB_RECORDED;
7232            // Reset CB status flags
7233            pCB->status = 0;
7234            printCB(dev_data, commandBuffer);
7235        }
7236    } else {
7237        result = VK_ERROR_VALIDATION_FAILED_EXT;
7238    }
7239    lock.unlock();
7240    return result;
7241}
7242
7243VKAPI_ATTR VkResult VKAPI_CALL
7244ResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) {
7245    bool skip_call = false;
7246    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7247    std::unique_lock<std::mutex> lock(global_lock);
7248    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7249    VkCommandPool cmdPool = pCB->createInfo.commandPool;
7250    auto pPool = getCommandPoolNode(dev_data, cmdPool);
7251    if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
7252        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7253                             (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
7254                             "Attempt to reset command buffer (0x%" PRIxLEAST64 ") created from command pool (0x%" PRIxLEAST64
7255                             ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
7256                             (uint64_t)commandBuffer, (uint64_t)cmdPool);
7257    }
7258    skip_call |= checkCommandBufferInFlight(dev_data, pCB, "reset");
7259    lock.unlock();
7260    if (skip_call)
7261        return VK_ERROR_VALIDATION_FAILED_EXT;
7262    VkResult result = dev_data->dispatch_table.ResetCommandBuffer(commandBuffer, flags);
7263    if (VK_SUCCESS == result) {
7264        lock.lock();
7265        dev_data->globalInFlightCmdBuffers.erase(commandBuffer);
7266        resetCB(dev_data, commandBuffer);
7267        lock.unlock();
7268    }
7269    return result;
7270}
7271
7272VKAPI_ATTR void VKAPI_CALL
7273CmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline) {
7274    bool skip_call = false;
7275    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7276    std::unique_lock<std::mutex> lock(global_lock);
7277    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7278    if (pCB) {
7279        skip_call |= addCmd(dev_data, pCB, CMD_BINDPIPELINE, "vkCmdBindPipeline()");
7280        if ((VK_PIPELINE_BIND_POINT_COMPUTE == pipelineBindPoint) && (pCB->activeRenderPass)) {
7281            skip_call |=
7282                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
7283                        (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
7284                        "Incorrectly binding compute pipeline (0x%" PRIxLEAST64 ") during active RenderPass (0x%" PRIxLEAST64 ")",
7285                        (uint64_t)pipeline, (uint64_t)pCB->activeRenderPass->renderPass);
7286        }
7287
7288        PIPELINE_STATE *pPN = getPipelineState(dev_data, pipeline);
7289        if (pPN) {
7290            pCB->lastBound[pipelineBindPoint].pipeline_state = pPN;
7291            set_cb_pso_status(pCB, pPN);
7292            set_pipeline_state(pPN);
7293        } else {
7294            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
7295                                 (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_PIPELINE, "DS",
7296                                 "Attempt to bind Pipeline 0x%" PRIxLEAST64 " that doesn't exist!", (uint64_t)(pipeline));
7297        }
7298        addCommandBufferBinding(&getPipelineState(dev_data, pipeline)->cb_bindings,
7299                                {reinterpret_cast<uint64_t &>(pipeline), VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT}, pCB);
7300    }
7301    lock.unlock();
7302    if (!skip_call)
7303        dev_data->dispatch_table.CmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline);
7304}
7305
7306VKAPI_ATTR void VKAPI_CALL
7307CmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport *pViewports) {
7308    bool skip_call = false;
7309    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7310    std::unique_lock<std::mutex> lock(global_lock);
7311    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7312    if (pCB) {
7313        skip_call |= addCmd(dev_data, pCB, CMD_SETVIEWPORTSTATE, "vkCmdSetViewport()");
7314        pCB->viewportMask |= ((1u<<viewportCount) - 1u) << firstViewport;
7315    }
7316    lock.unlock();
7317    if (!skip_call)
7318        dev_data->dispatch_table.CmdSetViewport(commandBuffer, firstViewport, viewportCount, pViewports);
7319}
7320
7321VKAPI_ATTR void VKAPI_CALL
7322CmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D *pScissors) {
7323    bool skip_call = false;
7324    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7325    std::unique_lock<std::mutex> lock(global_lock);
7326    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7327    if (pCB) {
7328        skip_call |= addCmd(dev_data, pCB, CMD_SETSCISSORSTATE, "vkCmdSetScissor()");
7329        pCB->scissorMask |= ((1u<<scissorCount) - 1u) << firstScissor;
7330    }
7331    lock.unlock();
7332    if (!skip_call)
7333        dev_data->dispatch_table.CmdSetScissor(commandBuffer, firstScissor, scissorCount, pScissors);
7334}
7335
7336VKAPI_ATTR void VKAPI_CALL CmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) {
7337    bool skip_call = false;
7338    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7339    std::unique_lock<std::mutex> lock(global_lock);
7340    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7341    if (pCB) {
7342        skip_call |= addCmd(dev_data, pCB, CMD_SETLINEWIDTHSTATE, "vkCmdSetLineWidth()");
7343        pCB->status |= CBSTATUS_LINE_WIDTH_SET;
7344
7345        PIPELINE_STATE *pPipeTrav = pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline_state;
7346        if (pPipeTrav != NULL && !isDynamic(pPipeTrav, VK_DYNAMIC_STATE_LINE_WIDTH)) {
7347            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
7348                                 reinterpret_cast<uint64_t &>(commandBuffer), __LINE__, DRAWSTATE_INVALID_SET, "DS",
7349                                 "vkCmdSetLineWidth called but pipeline was created without VK_DYNAMIC_STATE_LINE_WIDTH "
7350                                 "flag.  This is undefined behavior and could be ignored.");
7351        } else {
7352            skip_call |= verifyLineWidth(dev_data, DRAWSTATE_INVALID_SET, reinterpret_cast<uint64_t &>(commandBuffer), lineWidth);
7353        }
7354    }
7355    lock.unlock();
7356    if (!skip_call)
7357        dev_data->dispatch_table.CmdSetLineWidth(commandBuffer, lineWidth);
7358}
7359
7360VKAPI_ATTR void VKAPI_CALL
7361CmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor) {
7362    bool skip_call = false;
7363    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7364    std::unique_lock<std::mutex> lock(global_lock);
7365    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7366    if (pCB) {
7367        skip_call |= addCmd(dev_data, pCB, CMD_SETDEPTHBIASSTATE, "vkCmdSetDepthBias()");
7368        pCB->status |= CBSTATUS_DEPTH_BIAS_SET;
7369    }
7370    lock.unlock();
7371    if (!skip_call)
7372        dev_data->dispatch_table.CmdSetDepthBias(commandBuffer, depthBiasConstantFactor, depthBiasClamp, depthBiasSlopeFactor);
7373}
7374
7375VKAPI_ATTR void VKAPI_CALL CmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) {
7376    bool skip_call = false;
7377    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7378    std::unique_lock<std::mutex> lock(global_lock);
7379    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7380    if (pCB) {
7381        skip_call |= addCmd(dev_data, pCB, CMD_SETBLENDSTATE, "vkCmdSetBlendConstants()");
7382        pCB->status |= CBSTATUS_BLEND_CONSTANTS_SET;
7383    }
7384    lock.unlock();
7385    if (!skip_call)
7386        dev_data->dispatch_table.CmdSetBlendConstants(commandBuffer, blendConstants);
7387}
7388
7389VKAPI_ATTR void VKAPI_CALL
7390CmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) {
7391    bool skip_call = false;
7392    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7393    std::unique_lock<std::mutex> lock(global_lock);
7394    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7395    if (pCB) {
7396        skip_call |= addCmd(dev_data, pCB, CMD_SETDEPTHBOUNDSSTATE, "vkCmdSetDepthBounds()");
7397        pCB->status |= CBSTATUS_DEPTH_BOUNDS_SET;
7398    }
7399    lock.unlock();
7400    if (!skip_call)
7401        dev_data->dispatch_table.CmdSetDepthBounds(commandBuffer, minDepthBounds, maxDepthBounds);
7402}
7403
7404VKAPI_ATTR void VKAPI_CALL
7405CmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask) {
7406    bool skip_call = false;
7407    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7408    std::unique_lock<std::mutex> lock(global_lock);
7409    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7410    if (pCB) {
7411        skip_call |= addCmd(dev_data, pCB, CMD_SETSTENCILREADMASKSTATE, "vkCmdSetStencilCompareMask()");
7412        pCB->status |= CBSTATUS_STENCIL_READ_MASK_SET;
7413    }
7414    lock.unlock();
7415    if (!skip_call)
7416        dev_data->dispatch_table.CmdSetStencilCompareMask(commandBuffer, faceMask, compareMask);
7417}
7418
7419VKAPI_ATTR void VKAPI_CALL
7420CmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask) {
7421    bool skip_call = false;
7422    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7423    std::unique_lock<std::mutex> lock(global_lock);
7424    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7425    if (pCB) {
7426        skip_call |= addCmd(dev_data, pCB, CMD_SETSTENCILWRITEMASKSTATE, "vkCmdSetStencilWriteMask()");
7427        pCB->status |= CBSTATUS_STENCIL_WRITE_MASK_SET;
7428    }
7429    lock.unlock();
7430    if (!skip_call)
7431        dev_data->dispatch_table.CmdSetStencilWriteMask(commandBuffer, faceMask, writeMask);
7432}
7433
7434VKAPI_ATTR void VKAPI_CALL
7435CmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference) {
7436    bool skip_call = false;
7437    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7438    std::unique_lock<std::mutex> lock(global_lock);
7439    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7440    if (pCB) {
7441        skip_call |= addCmd(dev_data, pCB, CMD_SETSTENCILREFERENCESTATE, "vkCmdSetStencilReference()");
7442        pCB->status |= CBSTATUS_STENCIL_REFERENCE_SET;
7443    }
7444    lock.unlock();
7445    if (!skip_call)
7446        dev_data->dispatch_table.CmdSetStencilReference(commandBuffer, faceMask, reference);
7447}
7448
7449VKAPI_ATTR void VKAPI_CALL
7450CmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout,
7451                      uint32_t firstSet, uint32_t setCount, const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount,
7452                      const uint32_t *pDynamicOffsets) {
7453    bool skip_call = false;
7454    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7455    std::unique_lock<std::mutex> lock(global_lock);
7456    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7457    if (pCB) {
7458        if (pCB->state == CB_RECORDING) {
7459            // Track total count of dynamic descriptor types to make sure we have an offset for each one
7460            uint32_t totalDynamicDescriptors = 0;
7461            string errorString = "";
7462            uint32_t lastSetIndex = firstSet + setCount - 1;
7463            if (lastSetIndex >= pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size()) {
7464                pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1);
7465                pCB->lastBound[pipelineBindPoint].dynamicOffsets.resize(lastSetIndex + 1);
7466            }
7467            auto oldFinalBoundSet = pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex];
7468            auto pipeline_layout = getPipelineStateLayout(dev_data, layout);
7469            for (uint32_t i = 0; i < setCount; i++) {
7470                cvdescriptorset::DescriptorSet *pSet = getSetNode(dev_data, pDescriptorSets[i]);
7471                if (pSet) {
7472                    pCB->lastBound[pipelineBindPoint].pipeline_layout = *pipeline_layout;
7473                    pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i + firstSet] = pSet;
7474                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
7475                                         VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7476                                         DRAWSTATE_NONE, "DS", "DS 0x%" PRIxLEAST64 " bound on pipeline %s",
7477                                         (uint64_t)pDescriptorSets[i], string_VkPipelineBindPoint(pipelineBindPoint));
7478                    if (!pSet->IsUpdated() && (pSet->GetTotalDescriptorCount() != 0)) {
7479                        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
7480                                             VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7481                                             DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
7482                                             "DS 0x%" PRIxLEAST64
7483                                             " bound but it was never updated. You may want to either update it or not bind it.",
7484                                             (uint64_t)pDescriptorSets[i]);
7485                    }
7486                    // Verify that set being bound is compatible with overlapping setLayout of pipelineLayout
7487                    if (!verify_set_layout_compatibility(dev_data, pSet, pipeline_layout, i + firstSet, errorString)) {
7488                        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7489                                             VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7490                                             DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
7491                                             "descriptorSet #%u being bound is not compatible with overlapping descriptorSetLayout "
7492                                             "at index %u of pipelineLayout 0x%" PRIxLEAST64 " due to: %s",
7493                                             i, i + firstSet, reinterpret_cast<uint64_t &>(layout), errorString.c_str());
7494                    }
7495
7496                    auto setDynamicDescriptorCount = pSet->GetDynamicDescriptorCount();
7497
7498                    pCB->lastBound[pipelineBindPoint].dynamicOffsets[firstSet + i].clear();
7499
7500                    if (setDynamicDescriptorCount) {
7501                        // First make sure we won't overstep bounds of pDynamicOffsets array
7502                        if ((totalDynamicDescriptors + setDynamicDescriptorCount) > dynamicOffsetCount) {
7503                            skip_call |=
7504                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7505                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7506                                        DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
7507                                        "descriptorSet #%u (0x%" PRIxLEAST64
7508                                        ") requires %u dynamicOffsets, but only %u dynamicOffsets are left in pDynamicOffsets "
7509                                        "array. There must be one dynamic offset for each dynamic descriptor being bound.",
7510                                        i, (uint64_t)pDescriptorSets[i], pSet->GetDynamicDescriptorCount(),
7511                                        (dynamicOffsetCount - totalDynamicDescriptors));
7512                        } else { // Validate and store dynamic offsets with the set
7513                            // Validate Dynamic Offset Minimums
7514                            uint32_t cur_dyn_offset = totalDynamicDescriptors;
7515                            for (uint32_t d = 0; d < pSet->GetTotalDescriptorCount(); d++) {
7516                                if (pSet->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
7517                                    if (vk_safe_modulo(
7518                                            pDynamicOffsets[cur_dyn_offset],
7519                                            dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment) != 0) {
7520                                        skip_call |= log_msg(
7521                                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7522                                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
7523                                            DRAWSTATE_INVALID_UNIFORM_BUFFER_OFFSET, "DS",
7524                                            "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
7525                                            "device limit minUniformBufferOffsetAlignment 0x%" PRIxLEAST64,
7526                                            cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
7527                                            dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment);
7528                                    }
7529                                    cur_dyn_offset++;
7530                                } else if (pSet->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
7531                                    if (vk_safe_modulo(
7532                                            pDynamicOffsets[cur_dyn_offset],
7533                                            dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment) != 0) {
7534                                        skip_call |= log_msg(
7535                                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7536                                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
7537                                            DRAWSTATE_INVALID_STORAGE_BUFFER_OFFSET, "DS",
7538                                            "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
7539                                            "device limit minStorageBufferOffsetAlignment 0x%" PRIxLEAST64,
7540                                            cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
7541                                            dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment);
7542                                    }
7543                                    cur_dyn_offset++;
7544                                }
7545                            }
7546
7547                            pCB->lastBound[pipelineBindPoint].dynamicOffsets[firstSet + i] =
7548                                std::vector<uint32_t>(pDynamicOffsets + totalDynamicDescriptors,
7549                                                      pDynamicOffsets + totalDynamicDescriptors + setDynamicDescriptorCount);
7550                            // Keep running total of dynamic descriptor count to verify at the end
7551                            totalDynamicDescriptors += setDynamicDescriptorCount;
7552
7553                        }
7554                    }
7555                } else {
7556                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7557                                         VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7558                                         DRAWSTATE_INVALID_SET, "DS", "Attempt to bind DS 0x%" PRIxLEAST64 " that doesn't exist!",
7559                                         (uint64_t)pDescriptorSets[i]);
7560                }
7561                skip_call |= addCmd(dev_data, pCB, CMD_BINDDESCRIPTORSETS, "vkCmdBindDescriptorSets()");
7562                // For any previously bound sets, need to set them to "invalid" if they were disturbed by this update
7563                if (firstSet > 0) { // Check set #s below the first bound set
7564                    for (uint32_t i = 0; i < firstSet; ++i) {
7565                        if (pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] &&
7566                            !verify_set_layout_compatibility(dev_data, pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i],
7567                                                             pipeline_layout, i, errorString)) {
7568                            skip_call |= log_msg(
7569                                dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7570                                VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
7571                                (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], __LINE__, DRAWSTATE_NONE, "DS",
7572                                "DescriptorSetDS 0x%" PRIxLEAST64
7573                                " previously bound as set #%u was disturbed by newly bound pipelineLayout (0x%" PRIxLEAST64 ")",
7574                                (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], i, (uint64_t)layout);
7575                            pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] = VK_NULL_HANDLE;
7576                        }
7577                    }
7578                }
7579                // Check if newly last bound set invalidates any remaining bound sets
7580                if ((pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size() - 1) > (lastSetIndex)) {
7581                    if (oldFinalBoundSet &&
7582                        !verify_set_layout_compatibility(dev_data, oldFinalBoundSet, pipeline_layout, lastSetIndex, errorString)) {
7583                        auto old_set = oldFinalBoundSet->GetSet();
7584                        skip_call |=
7585                            log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7586                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, reinterpret_cast<uint64_t &>(old_set), __LINE__,
7587                                    DRAWSTATE_NONE, "DS", "DescriptorSetDS 0x%" PRIxLEAST64
7588                                                          " previously bound as set #%u is incompatible with set 0x%" PRIxLEAST64
7589                                                          " newly bound as set #%u so set #%u and any subsequent sets were "
7590                                                          "disturbed by newly bound pipelineLayout (0x%" PRIxLEAST64 ")",
7591                                    reinterpret_cast<uint64_t &>(old_set), lastSetIndex,
7592                                    (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex], lastSetIndex,
7593                                    lastSetIndex + 1, (uint64_t)layout);
7594                        pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1);
7595                    }
7596                }
7597            }
7598            //  dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound
7599            if (totalDynamicDescriptors != dynamicOffsetCount) {
7600                skip_call |=
7601                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7602                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
7603                            "Attempting to bind %u descriptorSets with %u dynamic descriptors, but dynamicOffsetCount "
7604                            "is %u. It should exactly match the number of dynamic descriptors.",
7605                            setCount, totalDynamicDescriptors, dynamicOffsetCount);
7606            }
7607        } else {
7608            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindDescriptorSets()");
7609        }
7610    }
7611    lock.unlock();
7612    if (!skip_call)
7613        dev_data->dispatch_table.CmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet, setCount,
7614                                                       pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
7615}
7616
7617VKAPI_ATTR void VKAPI_CALL
7618CmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType) {
7619    bool skip_call = false;
7620    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7621    // TODO : Somewhere need to verify that IBs have correct usage state flagged
7622    std::unique_lock<std::mutex> lock(global_lock);
7623
7624    auto buff_node = getBufferNode(dev_data, buffer);
7625    auto cb_node = getCBNode(dev_data, commandBuffer);
7626    if (cb_node && buff_node) {
7627        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buff_node, "vkCmdBindIndexBuffer()");
7628        std::function<bool()> function = [=]() {
7629            return ValidateBufferMemoryIsValid(dev_data, buff_node, "vkCmdBindIndexBuffer()");
7630        };
7631        cb_node->validate_functions.push_back(function);
7632        skip_call |= addCmd(dev_data, cb_node, CMD_BINDINDEXBUFFER, "vkCmdBindIndexBuffer()");
7633        VkDeviceSize offset_align = 0;
7634        switch (indexType) {
7635        case VK_INDEX_TYPE_UINT16:
7636            offset_align = 2;
7637            break;
7638        case VK_INDEX_TYPE_UINT32:
7639            offset_align = 4;
7640            break;
7641        default:
7642            // ParamChecker should catch bad enum, we'll also throw alignment error below if offset_align stays 0
7643            break;
7644        }
7645        if (!offset_align || (offset % offset_align)) {
7646            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7647                                 DRAWSTATE_VTX_INDEX_ALIGNMENT_ERROR, "DS",
7648                                 "vkCmdBindIndexBuffer() offset (0x%" PRIxLEAST64 ") does not fall on alignment (%s) boundary.",
7649                                 offset, string_VkIndexType(indexType));
7650        }
7651        cb_node->status |= CBSTATUS_INDEX_BUFFER_BOUND;
7652    } else {
7653        assert(0);
7654    }
7655    lock.unlock();
7656    if (!skip_call)
7657        dev_data->dispatch_table.CmdBindIndexBuffer(commandBuffer, buffer, offset, indexType);
7658}
7659
7660void updateResourceTracking(GLOBAL_CB_NODE *pCB, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers) {
7661    uint32_t end = firstBinding + bindingCount;
7662    if (pCB->currentDrawData.buffers.size() < end) {
7663        pCB->currentDrawData.buffers.resize(end);
7664    }
7665    for (uint32_t i = 0; i < bindingCount; ++i) {
7666        pCB->currentDrawData.buffers[i + firstBinding] = pBuffers[i];
7667    }
7668}
7669
7670static inline void updateResourceTrackingOnDraw(GLOBAL_CB_NODE *pCB) { pCB->drawData.push_back(pCB->currentDrawData); }
7671
7672VKAPI_ATTR void VKAPI_CALL CmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding,
7673                                                uint32_t bindingCount, const VkBuffer *pBuffers,
7674                                                const VkDeviceSize *pOffsets) {
7675    bool skip_call = false;
7676    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7677    // TODO : Somewhere need to verify that VBs have correct usage state flagged
7678    std::unique_lock<std::mutex> lock(global_lock);
7679
7680    auto cb_node = getCBNode(dev_data, commandBuffer);
7681    if (cb_node) {
7682        for (uint32_t i = 0; i < bindingCount; ++i) {
7683            auto buff_node = getBufferNode(dev_data, pBuffers[i]);
7684            assert(buff_node);
7685            skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buff_node, "vkCmdBindVertexBuffers()");
7686            std::function<bool()> function = [=]() {
7687                return ValidateBufferMemoryIsValid(dev_data, buff_node, "vkCmdBindVertexBuffers()");
7688            };
7689            cb_node->validate_functions.push_back(function);
7690        }
7691        addCmd(dev_data, cb_node, CMD_BINDVERTEXBUFFER, "vkCmdBindVertexBuffer()");
7692        updateResourceTracking(cb_node, firstBinding, bindingCount, pBuffers);
7693    } else {
7694        skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindVertexBuffer()");
7695    }
7696    lock.unlock();
7697    if (!skip_call)
7698        dev_data->dispatch_table.CmdBindVertexBuffers(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets);
7699}
7700
7701/* expects global_lock to be held by caller */
7702static bool markStoreImagesAndBuffersAsWritten(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
7703    bool skip_call = false;
7704
7705    for (auto imageView : pCB->updateImages) {
7706        auto view_state = getImageViewState(dev_data, imageView);
7707        if (!view_state)
7708            continue;
7709
7710        auto img_node = getImageNode(dev_data, view_state->create_info.image);
7711        assert(img_node);
7712        std::function<bool()> function = [=]() {
7713            SetImageMemoryValid(dev_data, img_node, true);
7714            return false;
7715        };
7716        pCB->validate_functions.push_back(function);
7717    }
7718    for (auto buffer : pCB->updateBuffers) {
7719        auto buff_node = getBufferNode(dev_data, buffer);
7720        assert(buff_node);
7721        std::function<bool()> function = [=]() {
7722            SetBufferMemoryValid(dev_data, buff_node, true);
7723            return false;
7724        };
7725        pCB->validate_functions.push_back(function);
7726    }
7727    return skip_call;
7728}
7729
7730VKAPI_ATTR void VKAPI_CALL CmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
7731                                   uint32_t firstVertex, uint32_t firstInstance) {
7732    bool skip_call = false;
7733    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7734    std::unique_lock<std::mutex> lock(global_lock);
7735    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7736    if (pCB) {
7737        skip_call |= addCmd(dev_data, pCB, CMD_DRAW, "vkCmdDraw()");
7738        pCB->drawCount[DRAW]++;
7739        skip_call |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDraw");
7740        skip_call |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
7741        // TODO : Need to pass commandBuffer as srcObj here
7742        skip_call |=
7743            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7744                    __LINE__, DRAWSTATE_NONE, "DS", "vkCmdDraw() call 0x%" PRIx64 ", reporting DS state:", g_drawCount[DRAW]++);
7745        skip_call |= synchAndPrintDSConfig(dev_data, commandBuffer);
7746        if (!skip_call) {
7747            updateResourceTrackingOnDraw(pCB);
7748        }
7749        skip_call |= outsideRenderPass(dev_data, pCB, "vkCmdDraw");
7750    }
7751    lock.unlock();
7752    if (!skip_call)
7753        dev_data->dispatch_table.CmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
7754}
7755
7756VKAPI_ATTR void VKAPI_CALL CmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount,
7757                                          uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset,
7758                                                            uint32_t firstInstance) {
7759    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7760    bool skip_call = false;
7761    std::unique_lock<std::mutex> lock(global_lock);
7762    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7763    if (pCB) {
7764        skip_call |= addCmd(dev_data, pCB, CMD_DRAWINDEXED, "vkCmdDrawIndexed()");
7765        pCB->drawCount[DRAW_INDEXED]++;
7766        skip_call |= validate_and_update_draw_state(dev_data, pCB, true, VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDrawIndexed");
7767        skip_call |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
7768        // TODO : Need to pass commandBuffer as srcObj here
7769        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
7770                             VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS",
7771                             "vkCmdDrawIndexed() call 0x%" PRIx64 ", reporting DS state:", g_drawCount[DRAW_INDEXED]++);
7772        skip_call |= synchAndPrintDSConfig(dev_data, commandBuffer);
7773        if (!skip_call) {
7774            updateResourceTrackingOnDraw(pCB);
7775        }
7776        skip_call |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndexed");
7777    }
7778    lock.unlock();
7779    if (!skip_call)
7780        dev_data->dispatch_table.CmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset, firstInstance);
7781}
7782
7783VKAPI_ATTR void VKAPI_CALL
7784CmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
7785    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7786    bool skip_call = false;
7787    std::unique_lock<std::mutex> lock(global_lock);
7788
7789    auto cb_node = getCBNode(dev_data, commandBuffer);
7790    auto buff_node = getBufferNode(dev_data, buffer);
7791    if (cb_node && buff_node) {
7792        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buff_node, "vkCmdDrawIndirect()");
7793        AddCommandBufferBindingBuffer(dev_data, cb_node, buff_node);
7794        skip_call |= addCmd(dev_data, cb_node, CMD_DRAWINDIRECT, "vkCmdDrawIndirect()");
7795        cb_node->drawCount[DRAW_INDIRECT]++;
7796        skip_call |= validate_and_update_draw_state(dev_data, cb_node, false, VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDrawIndirect");
7797        skip_call |= markStoreImagesAndBuffersAsWritten(dev_data, cb_node);
7798        // TODO : Need to pass commandBuffer as srcObj here
7799        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
7800                             VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS",
7801                             "vkCmdDrawIndirect() call 0x%" PRIx64 ", reporting DS state:", g_drawCount[DRAW_INDIRECT]++);
7802        skip_call |= synchAndPrintDSConfig(dev_data, commandBuffer);
7803        if (!skip_call) {
7804            updateResourceTrackingOnDraw(cb_node);
7805        }
7806        skip_call |= outsideRenderPass(dev_data, cb_node, "vkCmdDrawIndirect()");
7807    } else {
7808        assert(0);
7809    }
7810    lock.unlock();
7811    if (!skip_call)
7812        dev_data->dispatch_table.CmdDrawIndirect(commandBuffer, buffer, offset, count, stride);
7813}
7814
7815VKAPI_ATTR void VKAPI_CALL
7816CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
7817    bool skip_call = false;
7818    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7819    std::unique_lock<std::mutex> lock(global_lock);
7820
7821    auto cb_node = getCBNode(dev_data, commandBuffer);
7822    auto buff_node = getBufferNode(dev_data, buffer);
7823    if (cb_node && buff_node) {
7824        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buff_node, "vkCmdDrawIndexedIndirect()");
7825        AddCommandBufferBindingBuffer(dev_data, cb_node, buff_node);
7826        skip_call |= addCmd(dev_data, cb_node, CMD_DRAWINDEXEDINDIRECT, "vkCmdDrawIndexedIndirect()");
7827        cb_node->drawCount[DRAW_INDEXED_INDIRECT]++;
7828        skip_call |=
7829            validate_and_update_draw_state(dev_data, cb_node, true, VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDrawIndexedIndirect");
7830        skip_call |= markStoreImagesAndBuffersAsWritten(dev_data, cb_node);
7831        // TODO : Need to pass commandBuffer as srcObj here
7832        skip_call |=
7833            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7834                    __LINE__, DRAWSTATE_NONE, "DS", "vkCmdDrawIndexedIndirect() call 0x%" PRIx64 ", reporting DS state:",
7835                    g_drawCount[DRAW_INDEXED_INDIRECT]++);
7836        skip_call |= synchAndPrintDSConfig(dev_data, commandBuffer);
7837        if (!skip_call) {
7838            updateResourceTrackingOnDraw(cb_node);
7839        }
7840        skip_call |= outsideRenderPass(dev_data, cb_node, "vkCmdDrawIndexedIndirect()");
7841    } else {
7842        assert(0);
7843    }
7844    lock.unlock();
7845    if (!skip_call)
7846        dev_data->dispatch_table.CmdDrawIndexedIndirect(commandBuffer, buffer, offset, count, stride);
7847}
7848
7849VKAPI_ATTR void VKAPI_CALL CmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
7850    bool skip_call = false;
7851    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7852    std::unique_lock<std::mutex> lock(global_lock);
7853    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7854    if (pCB) {
7855        skip_call |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_COMPUTE, "vkCmdDispatch");
7856        skip_call |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
7857        skip_call |= addCmd(dev_data, pCB, CMD_DISPATCH, "vkCmdDispatch()");
7858        skip_call |= insideRenderPass(dev_data, pCB, "vkCmdDispatch");
7859    }
7860    lock.unlock();
7861    if (!skip_call)
7862        dev_data->dispatch_table.CmdDispatch(commandBuffer, x, y, z);
7863}
7864
7865VKAPI_ATTR void VKAPI_CALL
7866CmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
7867    bool skip_call = false;
7868    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7869    std::unique_lock<std::mutex> lock(global_lock);
7870
7871    auto cb_node = getCBNode(dev_data, commandBuffer);
7872    auto buff_node = getBufferNode(dev_data, buffer);
7873    if (cb_node && buff_node) {
7874        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buff_node, "vkCmdDispatchIndirect()");
7875        AddCommandBufferBindingBuffer(dev_data, cb_node, buff_node);
7876        skip_call |=
7877            validate_and_update_draw_state(dev_data, cb_node, false, VK_PIPELINE_BIND_POINT_COMPUTE, "vkCmdDispatchIndirect");
7878        skip_call |= markStoreImagesAndBuffersAsWritten(dev_data, cb_node);
7879        skip_call |= addCmd(dev_data, cb_node, CMD_DISPATCHINDIRECT, "vkCmdDispatchIndirect()");
7880        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdDispatchIndirect()");
7881    }
7882    lock.unlock();
7883    if (!skip_call)
7884        dev_data->dispatch_table.CmdDispatchIndirect(commandBuffer, buffer, offset);
7885}
7886
7887VKAPI_ATTR void VKAPI_CALL CmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
7888                                         uint32_t regionCount, const VkBufferCopy *pRegions) {
7889    bool skip_call = false;
7890    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7891    std::unique_lock<std::mutex> lock(global_lock);
7892
7893    auto cb_node = getCBNode(dev_data, commandBuffer);
7894    auto src_buff_node = getBufferNode(dev_data, srcBuffer);
7895    auto dst_buff_node = getBufferNode(dev_data, dstBuffer);
7896    if (cb_node && src_buff_node && dst_buff_node) {
7897        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, src_buff_node, "vkCmdCopyBuffer()");
7898        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_node, "vkCmdCopyBuffer()");
7899        // Update bindings between buffers and cmd buffer
7900        AddCommandBufferBindingBuffer(dev_data, cb_node, src_buff_node);
7901        AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_node);
7902        // Validate that SRC & DST buffers have correct usage flags set
7903        skip_call |= ValidateBufferUsageFlags(dev_data, src_buff_node, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true, "vkCmdCopyBuffer()",
7904                                              "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
7905        skip_call |= ValidateBufferUsageFlags(dev_data, dst_buff_node, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "vkCmdCopyBuffer()",
7906                                              "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
7907
7908        std::function<bool()> function = [=]() {
7909            return ValidateBufferMemoryIsValid(dev_data, src_buff_node, "vkCmdCopyBuffer()");
7910        };
7911        cb_node->validate_functions.push_back(function);
7912        function = [=]() {
7913            SetBufferMemoryValid(dev_data, dst_buff_node, true);
7914            return false;
7915        };
7916        cb_node->validate_functions.push_back(function);
7917
7918        skip_call |= addCmd(dev_data, cb_node, CMD_COPYBUFFER, "vkCmdCopyBuffer()");
7919        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyBuffer()");
7920    } else {
7921        // Param_checker will flag errors on invalid objects, just assert here as debugging aid
7922        assert(0);
7923    }
7924    lock.unlock();
7925    if (!skip_call)
7926        dev_data->dispatch_table.CmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions);
7927}
7928
7929static bool VerifySourceImageLayout(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, VkImage srcImage,
7930                                    VkImageSubresourceLayers subLayers, VkImageLayout srcImageLayout) {
7931    bool skip_call = false;
7932
7933    for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
7934        uint32_t layer = i + subLayers.baseArrayLayer;
7935        VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
7936        IMAGE_CMD_BUF_LAYOUT_NODE node;
7937        if (!FindLayout(cb_node, srcImage, sub, node)) {
7938            SetLayout(cb_node, srcImage, sub, IMAGE_CMD_BUF_LAYOUT_NODE(srcImageLayout, srcImageLayout));
7939            continue;
7940        }
7941        if (node.layout != srcImageLayout) {
7942            // TODO: Improve log message in the next pass
7943            skip_call |=
7944                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7945                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose source layout is %s "
7946                                                                        "and doesn't match the current layout %s.",
7947                        string_VkImageLayout(srcImageLayout), string_VkImageLayout(node.layout));
7948        }
7949    }
7950    if (srcImageLayout != VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL) {
7951        if (srcImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
7952            // TODO : Can we deal with image node from the top of call tree and avoid map look-up here?
7953            auto image_node = getImageNode(dev_data, srcImage);
7954            if (image_node->createInfo.tiling != VK_IMAGE_TILING_LINEAR) {
7955                // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
7956                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7957                                     (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
7958                                     "Layout for input image should be TRANSFER_SRC_OPTIMAL instead of GENERAL.");
7959            }
7960        } else {
7961            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7962                                 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Layout for input image is %s but can only be "
7963                                                                       "TRANSFER_SRC_OPTIMAL or GENERAL.",
7964                                 string_VkImageLayout(srcImageLayout));
7965        }
7966    }
7967    return skip_call;
7968}
7969
7970static bool VerifyDestImageLayout(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, VkImage destImage,
7971                                  VkImageSubresourceLayers subLayers, VkImageLayout destImageLayout) {
7972    bool skip_call = false;
7973
7974    for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
7975        uint32_t layer = i + subLayers.baseArrayLayer;
7976        VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
7977        IMAGE_CMD_BUF_LAYOUT_NODE node;
7978        if (!FindLayout(cb_node, destImage, sub, node)) {
7979            SetLayout(cb_node, destImage, sub, IMAGE_CMD_BUF_LAYOUT_NODE(destImageLayout, destImageLayout));
7980            continue;
7981        }
7982        if (node.layout != destImageLayout) {
7983            skip_call |=
7984                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7985                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose dest layout is %s and "
7986                                                                        "doesn't match the current layout %s.",
7987                        string_VkImageLayout(destImageLayout), string_VkImageLayout(node.layout));
7988        }
7989    }
7990    if (destImageLayout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
7991        if (destImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
7992            auto image_node = getImageNode(dev_data, destImage);
7993            if (image_node->createInfo.tiling != VK_IMAGE_TILING_LINEAR) {
7994                // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
7995                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7996                                     (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
7997                                     "Layout for output image should be TRANSFER_DST_OPTIMAL instead of GENERAL.");
7998            }
7999        } else {
8000            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8001                                 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Layout for output image is %s but can only be "
8002                                                                       "TRANSFER_DST_OPTIMAL or GENERAL.",
8003                                 string_VkImageLayout(destImageLayout));
8004        }
8005    }
8006    return skip_call;
8007}
8008
8009// Test if two VkExtent3D structs are equivalent
8010static inline bool IsExtentEqual(const VkExtent3D *extent, const VkExtent3D *other_extent) {
8011    bool result = true;
8012    if ((extent->width != other_extent->width) || (extent->height != other_extent->height) ||
8013        (extent->depth != other_extent->depth)) {
8014        result = false;
8015    }
8016    return result;
8017}
8018
8019// Returns the image extent of a specific subresource.
8020static inline VkExtent3D GetImageSubresourceExtent(const IMAGE_NODE *img, const VkImageSubresourceLayers *subresource) {
8021    const uint32_t mip = subresource->mipLevel;
8022    VkExtent3D extent = img->createInfo.extent;
8023    extent.width = std::max(1U, extent.width >> mip);
8024    extent.height = std::max(1U, extent.height >> mip);
8025    extent.depth = std::max(1U, extent.depth >> mip);
8026    return extent;
8027}
8028
8029// Test if the extent argument has all dimensions set to 0.
8030static inline bool IsExtentZero(const VkExtent3D *extent) {
8031    return ((extent->width == 0) && (extent->height == 0) && (extent->depth == 0));
8032}
8033
8034// Returns the image transfer granularity for a specific image scaled by compressed block size if necessary.
8035static inline VkExtent3D GetScaledItg(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const IMAGE_NODE *img) {
8036    // Default to (0, 0, 0) granularity in case we can't find the real granularity for the physical device.
8037    VkExtent3D granularity = { 0, 0, 0 };
8038    auto pPool = getCommandPoolNode(dev_data, cb_node->createInfo.commandPool);
8039    if (pPool) {
8040        granularity = dev_data->phys_dev_properties.queue_family_properties[pPool->queueFamilyIndex].minImageTransferGranularity;
8041        if (vk_format_is_compressed(img->createInfo.format)) {
8042            auto block_size = vk_format_compressed_block_size(img->createInfo.format);
8043            granularity.width *= block_size.width;
8044            granularity.height *= block_size.height;
8045        }
8046    }
8047    return granularity;
8048}
8049
8050// Test elements of a VkExtent3D structure against alignment constraints contained in another VkExtent3D structure
8051static inline bool IsExtentAligned(const VkExtent3D *extent, const VkExtent3D *granularity) {
8052    bool valid = true;
8053    if ((vk_safe_modulo(extent->depth, granularity->depth) != 0) || (vk_safe_modulo(extent->width, granularity->width) != 0) ||
8054        (vk_safe_modulo(extent->height, granularity->height) != 0)) {
8055        valid = false;
8056    }
8057    return valid;
8058}
8059
8060// Check elements of a VkOffset3D structure against a queue family's Image Transfer Granularity values
8061static inline bool CheckItgOffset(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const VkOffset3D *offset,
8062                                  const VkExtent3D *granularity, const uint32_t i, const char *function, const char *member) {
8063    bool skip = false;
8064    VkExtent3D offset_extent = {};
8065    offset_extent.width = static_cast<uint32_t>(abs(offset->x));
8066    offset_extent.height = static_cast<uint32_t>(abs(offset->y));
8067    offset_extent.depth = static_cast<uint32_t>(abs(offset->z));
8068    if (IsExtentZero(granularity)) {
8069        // If the queue family image transfer granularity is (0, 0, 0), then the offset must always be (0, 0, 0)
8070        if (IsExtentZero(&offset_extent) == false) {
8071            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8072                            DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
8073                            "%s: pRegion[%d].%s (x=%d, y=%d, z=%d) must be (x=0, y=0, z=0) "
8074                            "when the command buffer's queue family image transfer granularity is (w=0, h=0, d=0).",
8075                            function, i, member, offset->x, offset->y, offset->z);
8076        }
8077    } else {
8078        // If the queue family image transfer granularity is not (0, 0, 0), then the offset dimensions must always be even
8079        // integer multiples of the image transfer granularity.
8080        if (IsExtentAligned(&offset_extent, granularity) == false) {
8081            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8082                            DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
8083                            "%s: pRegion[%d].%s (x=%d, y=%d, z=%d) dimensions must be even integer "
8084                            "multiples of this command buffer's queue family image transfer granularity (w=%d, h=%d, d=%d).",
8085                            function, i, member, offset->x, offset->y, offset->z, granularity->width, granularity->height,
8086                            granularity->depth);
8087        }
8088    }
8089    return skip;
8090}
8091
8092// Check elements of a VkExtent3D structure against a queue family's Image Transfer Granularity values
8093static inline bool CheckItgExtent(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const VkExtent3D *extent,
8094                                  const VkOffset3D *offset, const VkExtent3D *granularity, const VkExtent3D *subresource_extent,
8095                                  const uint32_t i, const char *function, const char *member) {
8096    bool skip = false;
8097    if (IsExtentZero(granularity)) {
8098        // If the queue family image transfer granularity is (0, 0, 0), then the extent must always match the image
8099        // subresource extent.
8100        if (IsExtentEqual(extent, subresource_extent) == false) {
8101            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8102                            DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
8103                            "%s: pRegion[%d].%s (w=%d, h=%d, d=%d) must match the image subresource extents (w=%d, h=%d, d=%d) "
8104                            "when the command buffer's queue family image transfer granularity is (w=0, h=0, d=0).",
8105                            function, i, member, extent->width, extent->height, extent->depth, subresource_extent->width,
8106                            subresource_extent->height, subresource_extent->depth);
8107        }
8108    } else {
8109        // If the queue family image transfer granularity is not (0, 0, 0), then the extent dimensions must always be even
8110        // integer multiples of the image transfer granularity or the offset + extent dimensions must always match the image
8111        // subresource extent dimensions.
8112        VkExtent3D offset_extent_sum = {};
8113        offset_extent_sum.width = static_cast<uint32_t>(abs(offset->x)) + extent->width;
8114        offset_extent_sum.height = static_cast<uint32_t>(abs(offset->y)) + extent->height;
8115        offset_extent_sum.depth = static_cast<uint32_t>(abs(offset->z)) + extent->depth;
8116        if ((IsExtentAligned(extent, granularity) == false) && (IsExtentEqual(&offset_extent_sum, subresource_extent) == false)) {
8117            skip |=
8118                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8119                        DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
8120                        "%s: pRegion[%d].%s (w=%d, h=%d, d=%d) dimensions must be even integer multiples of this command buffer's "
8121                        "queue family image transfer granularity (w=%d, h=%d, d=%d) or offset (x=%d, y=%d, z=%d) + "
8122                        "extent (w=%d, h=%d, d=%d) must match the image subresource extents (w=%d, h=%d, d=%d).",
8123                        function, i, member, extent->width, extent->height, extent->depth, granularity->width, granularity->height,
8124                        granularity->depth, offset->x, offset->y, offset->z, extent->width, extent->height, extent->depth,
8125                        subresource_extent->width, subresource_extent->height, subresource_extent->depth);
8126        }
8127    }
8128    return skip;
8129}
8130
8131// Check a uint32_t width or stride value against a queue family's Image Transfer Granularity width value
8132static inline bool CheckItgInt(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const uint32_t value,
8133                               const uint32_t granularity, const uint32_t i, const char *function, const char *member) {
8134    bool skip = false;
8135    if (vk_safe_modulo(value, granularity) != 0) {
8136        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8137                        DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
8138                        "%s: pRegion[%d].%s (%d) must be an even integer multiple of this command buffer's queue family image "
8139                        "transfer granularity width (%d).",
8140                        function, i, member, value, granularity);
8141    }
8142    return skip;
8143}
8144
8145// Check a VkDeviceSize value against a queue family's Image Transfer Granularity width value
8146static inline bool CheckItgSize(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const VkDeviceSize value,
8147                                const uint32_t granularity, const uint32_t i, const char *function, const char *member) {
8148    bool skip = false;
8149    if (vk_safe_modulo(value, granularity) != 0) {
8150        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8151                        DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
8152                        "%s: pRegion[%d].%s (%" PRIdLEAST64
8153                        ") must be an even integer multiple of this command buffer's queue family image transfer "
8154                        "granularity width (%d).",
8155                        function, i, member, value, granularity);
8156    }
8157    return skip;
8158}
8159
8160// Check valid usage Image Tranfer Granularity requirements for elements of a VkImageCopy structure
8161static inline bool ValidateCopyImageTransferGranularityRequirements(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node,
8162                                                                    const IMAGE_NODE *img, const VkImageCopy *region,
8163                                                                    const uint32_t i, const char *function) {
8164    bool skip = false;
8165    VkExtent3D granularity = GetScaledItg(dev_data, cb_node, img);
8166    skip |= CheckItgOffset(dev_data, cb_node, &region->srcOffset, &granularity, i, function, "srcOffset");
8167    skip |= CheckItgOffset(dev_data, cb_node, &region->dstOffset, &granularity, i, function, "dstOffset");
8168    VkExtent3D subresource_extent = GetImageSubresourceExtent(img, &region->dstSubresource);
8169    skip |= CheckItgExtent(dev_data, cb_node, &region->extent, &region->dstOffset, &granularity, &subresource_extent, i, function,
8170                           "extent");
8171    return skip;
8172}
8173
8174// Check valid usage Image Tranfer Granularity requirements for elements of a VkBufferImageCopy structure
8175static inline bool ValidateCopyBufferImageTransferGranularityRequirements(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node,
8176                                                                          const IMAGE_NODE *img, const VkBufferImageCopy *region,
8177                                                                          const uint32_t i, const char *function) {
8178    bool skip = false;
8179    VkExtent3D granularity = GetScaledItg(dev_data, cb_node, img);
8180    skip |= CheckItgSize(dev_data, cb_node, region->bufferOffset, granularity.width, i, function, "bufferOffset");
8181    skip |= CheckItgInt(dev_data, cb_node, region->bufferRowLength, granularity.width, i, function, "bufferRowLength");
8182    skip |= CheckItgInt(dev_data, cb_node, region->bufferImageHeight, granularity.width, i, function, "bufferImageHeight");
8183    skip |= CheckItgOffset(dev_data, cb_node, &region->imageOffset, &granularity, i, function, "imageOffset");
8184    VkExtent3D subresource_extent = GetImageSubresourceExtent(img, &region->imageSubresource);
8185    skip |= CheckItgExtent(dev_data, cb_node, &region->imageExtent, &region->imageOffset, &granularity, &subresource_extent, i,
8186                           function, "imageExtent");
8187    return skip;
8188}
8189
8190VKAPI_ATTR void VKAPI_CALL
8191CmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
8192             VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy *pRegions) {
8193    bool skip_call = false;
8194    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8195    std::unique_lock<std::mutex> lock(global_lock);
8196
8197    auto cb_node = getCBNode(dev_data, commandBuffer);
8198    auto src_img_node = getImageNode(dev_data, srcImage);
8199    auto dst_img_node = getImageNode(dev_data, dstImage);
8200    if (cb_node && src_img_node && dst_img_node) {
8201        skip_call |= ValidateMemoryIsBoundToImage(dev_data, src_img_node, "vkCmdCopyImage()");
8202        skip_call |= ValidateMemoryIsBoundToImage(dev_data, dst_img_node, "vkCmdCopyImage()");
8203        // Update bindings between images and cmd buffer
8204        AddCommandBufferBindingImage(dev_data, cb_node, src_img_node);
8205        AddCommandBufferBindingImage(dev_data, cb_node, dst_img_node);
8206        // Validate that SRC & DST images have correct usage flags set
8207        skip_call |= ValidateImageUsageFlags(dev_data, src_img_node, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true, "vkCmdCopyImage()",
8208                                             "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
8209        skip_call |= ValidateImageUsageFlags(dev_data, dst_img_node, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true, "vkCmdCopyImage()",
8210                                             "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
8211        std::function<bool()> function = [=]() { return ValidateImageMemoryIsValid(dev_data, src_img_node, "vkCmdCopyImage()"); };
8212        cb_node->validate_functions.push_back(function);
8213        function = [=]() {
8214            SetImageMemoryValid(dev_data, dst_img_node, true);
8215            return false;
8216        };
8217        cb_node->validate_functions.push_back(function);
8218
8219        skip_call |= addCmd(dev_data, cb_node, CMD_COPYIMAGE, "vkCmdCopyImage()");
8220        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyImage()");
8221        for (uint32_t i = 0; i < regionCount; ++i) {
8222            skip_call |= VerifySourceImageLayout(dev_data, cb_node, srcImage, pRegions[i].srcSubresource, srcImageLayout);
8223            skip_call |= VerifyDestImageLayout(dev_data, cb_node, dstImage, pRegions[i].dstSubresource, dstImageLayout);
8224            skip_call |= ValidateCopyImageTransferGranularityRequirements(dev_data, cb_node, dst_img_node, &pRegions[i], i,
8225                                                                          "vkCmdCopyImage()");
8226        }
8227    } else {
8228        assert(0);
8229    }
8230    lock.unlock();
8231    if (!skip_call)
8232        dev_data->dispatch_table.CmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
8233                                              pRegions);
8234}
8235
8236// Validate that an image's sampleCount matches the requirement for a specific API call
8237static inline bool ValidateImageSampleCount(layer_data *dev_data, IMAGE_NODE *image_node, VkSampleCountFlagBits sample_count,
8238                                            const char *location) {
8239    bool skip = false;
8240    if (image_node->createInfo.samples != sample_count) {
8241        skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
8242                       reinterpret_cast<uint64_t &>(image_node->image), 0, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
8243                       "%s for image 0x%" PRIxLEAST64 " was created with a sample count of %s but must be %s.", location,
8244                       reinterpret_cast<uint64_t &>(image_node->image),
8245                       string_VkSampleCountFlagBits(image_node->createInfo.samples), string_VkSampleCountFlagBits(sample_count));
8246    }
8247    return skip;
8248}
8249
8250VKAPI_ATTR void VKAPI_CALL
8251CmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
8252             VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit *pRegions, VkFilter filter) {
8253    bool skip_call = false;
8254    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8255    std::unique_lock<std::mutex> lock(global_lock);
8256
8257    auto cb_node = getCBNode(dev_data, commandBuffer);
8258    auto src_img_node = getImageNode(dev_data, srcImage);
8259    auto dst_img_node = getImageNode(dev_data, dstImage);
8260    if (cb_node && src_img_node && dst_img_node) {
8261        skip_call |= ValidateImageSampleCount(dev_data, src_img_node, VK_SAMPLE_COUNT_1_BIT, "vkCmdBlitImage(): srcImage");
8262        skip_call |= ValidateImageSampleCount(dev_data, dst_img_node, VK_SAMPLE_COUNT_1_BIT, "vkCmdBlitImage(): dstImage");
8263        skip_call |= ValidateMemoryIsBoundToImage(dev_data, src_img_node, "vkCmdBlitImage()");
8264        skip_call |= ValidateMemoryIsBoundToImage(dev_data, dst_img_node, "vkCmdBlitImage()");
8265        // Update bindings between images and cmd buffer
8266        AddCommandBufferBindingImage(dev_data, cb_node, src_img_node);
8267        AddCommandBufferBindingImage(dev_data, cb_node, dst_img_node);
8268        // Validate that SRC & DST images have correct usage flags set
8269        skip_call |= ValidateImageUsageFlags(dev_data, src_img_node, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true, "vkCmdBlitImage()",
8270                                             "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
8271        skip_call |= ValidateImageUsageFlags(dev_data, dst_img_node, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true, "vkCmdBlitImage()",
8272                                             "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
8273        std::function<bool()> function = [=]() { return ValidateImageMemoryIsValid(dev_data, src_img_node, "vkCmdBlitImage()"); };
8274        cb_node->validate_functions.push_back(function);
8275        function = [=]() {
8276            SetImageMemoryValid(dev_data, dst_img_node, true);
8277            return false;
8278        };
8279        cb_node->validate_functions.push_back(function);
8280
8281        skip_call |= addCmd(dev_data, cb_node, CMD_BLITIMAGE, "vkCmdBlitImage()");
8282        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdBlitImage()");
8283    } else {
8284        assert(0);
8285    }
8286    lock.unlock();
8287    if (!skip_call)
8288        dev_data->dispatch_table.CmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
8289                                              pRegions, filter);
8290}
8291
8292VKAPI_ATTR void VKAPI_CALL CmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer,
8293                                                VkImage dstImage, VkImageLayout dstImageLayout,
8294                                                uint32_t regionCount, const VkBufferImageCopy *pRegions) {
8295    bool skip_call = false;
8296    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8297    std::unique_lock<std::mutex> lock(global_lock);
8298
8299    auto cb_node = getCBNode(dev_data, commandBuffer);
8300    auto src_buff_node = getBufferNode(dev_data, srcBuffer);
8301    auto dst_img_node = getImageNode(dev_data, dstImage);
8302    if (cb_node && src_buff_node && dst_img_node) {
8303        skip_call |= ValidateImageSampleCount(dev_data, dst_img_node, VK_SAMPLE_COUNT_1_BIT, "vkCmdCopyBufferToImage(): dstImage");
8304        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, src_buff_node, "vkCmdCopyBufferToImage()");
8305        skip_call |= ValidateMemoryIsBoundToImage(dev_data, dst_img_node, "vkCmdCopyBufferToImage()");
8306        AddCommandBufferBindingBuffer(dev_data, cb_node, src_buff_node);
8307        AddCommandBufferBindingImage(dev_data, cb_node, dst_img_node);
8308        skip_call |= ValidateBufferUsageFlags(dev_data, src_buff_node, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true,
8309                                              "vkCmdCopyBufferToImage()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
8310        skip_call |= ValidateImageUsageFlags(dev_data, dst_img_node, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
8311                                             "vkCmdCopyBufferToImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
8312        std::function<bool()> function = [=]() {
8313            SetImageMemoryValid(dev_data, dst_img_node, true);
8314            return false;
8315        };
8316        cb_node->validate_functions.push_back(function);
8317        function = [=]() { return ValidateBufferMemoryIsValid(dev_data, src_buff_node, "vkCmdCopyBufferToImage()"); };
8318        cb_node->validate_functions.push_back(function);
8319
8320        skip_call |= addCmd(dev_data, cb_node, CMD_COPYBUFFERTOIMAGE, "vkCmdCopyBufferToImage()");
8321        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyBufferToImage()");
8322        for (uint32_t i = 0; i < regionCount; ++i) {
8323            skip_call |= VerifyDestImageLayout(dev_data, cb_node, dstImage, pRegions[i].imageSubresource, dstImageLayout);
8324            skip_call |= ValidateCopyBufferImageTransferGranularityRequirements(dev_data, cb_node, dst_img_node, &pRegions[i], i,
8325                                                                                "vkCmdCopyBufferToImage()");
8326        }
8327    } else {
8328        assert(0);
8329    }
8330    lock.unlock();
8331    if (!skip_call)
8332        dev_data->dispatch_table.CmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions);
8333}
8334
8335VKAPI_ATTR void VKAPI_CALL CmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage,
8336                                                VkImageLayout srcImageLayout, VkBuffer dstBuffer,
8337                                                uint32_t regionCount, const VkBufferImageCopy *pRegions) {
8338    bool skip_call = false;
8339    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8340    std::unique_lock<std::mutex> lock(global_lock);
8341
8342    auto cb_node = getCBNode(dev_data, commandBuffer);
8343    auto src_img_node = getImageNode(dev_data, srcImage);
8344    auto dst_buff_node = getBufferNode(dev_data, dstBuffer);
8345    if (cb_node && src_img_node && dst_buff_node) {
8346        skip_call |= ValidateImageSampleCount(dev_data, src_img_node, VK_SAMPLE_COUNT_1_BIT, "vkCmdCopyImageToBuffer(): srcImage");
8347        skip_call |= ValidateMemoryIsBoundToImage(dev_data, src_img_node, "vkCmdCopyImageToBuffer()");
8348        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_node, "vkCmdCopyImageToBuffer()");
8349        // Update bindings between buffer/image and cmd buffer
8350        AddCommandBufferBindingImage(dev_data, cb_node, src_img_node);
8351        AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_node);
8352        // Validate that SRC image & DST buffer have correct usage flags set
8353        skip_call |= ValidateImageUsageFlags(dev_data, src_img_node, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
8354                                             "vkCmdCopyImageToBuffer()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
8355        skip_call |= ValidateBufferUsageFlags(dev_data, dst_buff_node, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8356                                              "vkCmdCopyImageToBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8357        std::function<bool()> function = [=]() {
8358            return ValidateImageMemoryIsValid(dev_data, src_img_node, "vkCmdCopyImageToBuffer()");
8359        };
8360        cb_node->validate_functions.push_back(function);
8361        function = [=]() {
8362            SetBufferMemoryValid(dev_data, dst_buff_node, true);
8363            return false;
8364        };
8365        cb_node->validate_functions.push_back(function);
8366
8367        skip_call |= addCmd(dev_data, cb_node, CMD_COPYIMAGETOBUFFER, "vkCmdCopyImageToBuffer()");
8368        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyImageToBuffer()");
8369        for (uint32_t i = 0; i < regionCount; ++i) {
8370            skip_call |= VerifySourceImageLayout(dev_data, cb_node, srcImage, pRegions[i].imageSubresource, srcImageLayout);
8371            skip_call |= ValidateCopyBufferImageTransferGranularityRequirements(dev_data, cb_node, src_img_node, &pRegions[i], i,
8372                                                                                "CmdCopyImageToBuffer");
8373        }
8374    } else {
8375        assert(0);
8376    }
8377    lock.unlock();
8378    if (!skip_call)
8379        dev_data->dispatch_table.CmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions);
8380}
8381
8382VKAPI_ATTR void VKAPI_CALL CmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer,
8383                                           VkDeviceSize dstOffset, VkDeviceSize dataSize, const uint32_t *pData) {
8384    bool skip_call = false;
8385    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8386    std::unique_lock<std::mutex> lock(global_lock);
8387
8388    auto cb_node = getCBNode(dev_data, commandBuffer);
8389    auto dst_buff_node = getBufferNode(dev_data, dstBuffer);
8390    if (cb_node && dst_buff_node) {
8391        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_node, "vkCmdUpdateBuffer()");
8392        // Update bindings between buffer and cmd buffer
8393        AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_node);
8394        // Validate that DST buffer has correct usage flags set
8395        skip_call |= ValidateBufferUsageFlags(dev_data, dst_buff_node, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8396                                              "vkCmdUpdateBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8397        std::function<bool()> function = [=]() {
8398            SetBufferMemoryValid(dev_data, dst_buff_node, true);
8399            return false;
8400        };
8401        cb_node->validate_functions.push_back(function);
8402
8403        skip_call |= addCmd(dev_data, cb_node, CMD_UPDATEBUFFER, "vkCmdUpdateBuffer()");
8404        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyUpdateBuffer()");
8405    } else {
8406        assert(0);
8407    }
8408    lock.unlock();
8409    if (!skip_call)
8410        dev_data->dispatch_table.CmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData);
8411}
8412
8413VKAPI_ATTR void VKAPI_CALL
8414CmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data) {
8415    bool skip_call = false;
8416    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8417    std::unique_lock<std::mutex> lock(global_lock);
8418
8419    auto cb_node = getCBNode(dev_data, commandBuffer);
8420    auto dst_buff_node = getBufferNode(dev_data, dstBuffer);
8421    if (cb_node && dst_buff_node) {
8422        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_node, "vkCmdFillBuffer()");
8423        // Update bindings between buffer and cmd buffer
8424        AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_node);
8425        // Validate that DST buffer has correct usage flags set
8426        skip_call |= ValidateBufferUsageFlags(dev_data, dst_buff_node, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "vkCmdFillBuffer()",
8427                                              "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8428        std::function<bool()> function = [=]() {
8429            SetBufferMemoryValid(dev_data, dst_buff_node, true);
8430            return false;
8431        };
8432        cb_node->validate_functions.push_back(function);
8433
8434        skip_call |= addCmd(dev_data, cb_node, CMD_FILLBUFFER, "vkCmdFillBuffer()");
8435        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyFillBuffer()");
8436    } else {
8437        assert(0);
8438    }
8439    lock.unlock();
8440    if (!skip_call)
8441        dev_data->dispatch_table.CmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
8442}
8443
8444VKAPI_ATTR void VKAPI_CALL CmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
8445                                               const VkClearAttachment *pAttachments, uint32_t rectCount,
8446                                               const VkClearRect *pRects) {
8447    bool skip_call = false;
8448    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8449    std::unique_lock<std::mutex> lock(global_lock);
8450    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8451    if (pCB) {
8452        skip_call |= addCmd(dev_data, pCB, CMD_CLEARATTACHMENTS, "vkCmdClearAttachments()");
8453        // Warn if this is issued prior to Draw Cmd and clearing the entire attachment
8454        if (!hasDrawCmd(pCB) && (pCB->activeRenderPassBeginInfo.renderArea.extent.width == pRects[0].rect.extent.width) &&
8455            (pCB->activeRenderPassBeginInfo.renderArea.extent.height == pRects[0].rect.extent.height)) {
8456            // There are times where app needs to use ClearAttachments (generally when reusing a buffer inside of a render pass)
8457            // Can we make this warning more specific? I'd like to avoid triggering this test if we can tell it's a use that must
8458            // call CmdClearAttachments
8459            // Otherwise this seems more like a performance warning.
8460            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
8461                                 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t &>(commandBuffer),
8462                                 0, DRAWSTATE_CLEAR_CMD_BEFORE_DRAW, "DS",
8463                                 "vkCmdClearAttachments() issued on CB object 0x%" PRIxLEAST64 " prior to any Draw Cmds."
8464                                 " It is recommended you use RenderPass LOAD_OP_CLEAR on Attachments prior to any Draw.",
8465                                 (uint64_t)(commandBuffer));
8466        }
8467        skip_call |= outsideRenderPass(dev_data, pCB, "vkCmdClearAttachments()");
8468    }
8469
8470    // Validate that attachment is in reference list of active subpass
8471    if (pCB->activeRenderPass) {
8472        const VkRenderPassCreateInfo *pRPCI = pCB->activeRenderPass->createInfo.ptr();
8473        const VkSubpassDescription *pSD = &pRPCI->pSubpasses[pCB->activeSubpass];
8474
8475        for (uint32_t attachment_idx = 0; attachment_idx < attachmentCount; attachment_idx++) {
8476            const VkClearAttachment *attachment = &pAttachments[attachment_idx];
8477            if (attachment->aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) {
8478                if (attachment->colorAttachment >= pSD->colorAttachmentCount) {
8479                    skip_call |= log_msg(
8480                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8481                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
8482                        "vkCmdClearAttachments() color attachment index %d out of range for active subpass %d; ignored",
8483                        attachment->colorAttachment, pCB->activeSubpass);
8484                }
8485                else if (pSD->pColorAttachments[attachment->colorAttachment].attachment == VK_ATTACHMENT_UNUSED) {
8486                    skip_call |= log_msg(
8487                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8488                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
8489                        "vkCmdClearAttachments() color attachment index %d is VK_ATTACHMENT_UNUSED; ignored",
8490                        attachment->colorAttachment);
8491                }
8492            } else if (attachment->aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
8493                if (!pSD->pDepthStencilAttachment || // Says no DS will be used in active subpass
8494                    (pSD->pDepthStencilAttachment->attachment ==
8495                     VK_ATTACHMENT_UNUSED)) { // Says no DS will be used in active subpass
8496
8497                    skip_call |= log_msg(
8498                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8499                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
8500                        "vkCmdClearAttachments() depth/stencil clear with no depth/stencil attachment in subpass; ignored");
8501                }
8502            }
8503        }
8504    }
8505    lock.unlock();
8506    if (!skip_call)
8507        dev_data->dispatch_table.CmdClearAttachments(commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
8508}
8509
8510VKAPI_ATTR void VKAPI_CALL CmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image,
8511                                              VkImageLayout imageLayout, const VkClearColorValue *pColor,
8512                                              uint32_t rangeCount, const VkImageSubresourceRange *pRanges) {
8513    bool skip_call = false;
8514    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8515    std::unique_lock<std::mutex> lock(global_lock);
8516    // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
8517
8518    auto cb_node = getCBNode(dev_data, commandBuffer);
8519    auto img_node = getImageNode(dev_data, image);
8520    if (cb_node && img_node) {
8521        skip_call |= ValidateMemoryIsBoundToImage(dev_data, img_node, "vkCmdClearColorImage()");
8522        AddCommandBufferBindingImage(dev_data, cb_node, img_node);
8523        std::function<bool()> function = [=]() {
8524            SetImageMemoryValid(dev_data, img_node, true);
8525            return false;
8526        };
8527        cb_node->validate_functions.push_back(function);
8528
8529        skip_call |= addCmd(dev_data, cb_node, CMD_CLEARCOLORIMAGE, "vkCmdClearColorImage()");
8530        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdClearColorImage()");
8531    } else {
8532        assert(0);
8533    }
8534    lock.unlock();
8535    if (!skip_call)
8536        dev_data->dispatch_table.CmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
8537}
8538
8539VKAPI_ATTR void VKAPI_CALL
8540CmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
8541                          const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
8542                          const VkImageSubresourceRange *pRanges) {
8543    bool skip_call = false;
8544    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8545    std::unique_lock<std::mutex> lock(global_lock);
8546    // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
8547
8548    auto cb_node = getCBNode(dev_data, commandBuffer);
8549    auto img_node = getImageNode(dev_data, image);
8550    if (cb_node && img_node) {
8551        skip_call |= ValidateMemoryIsBoundToImage(dev_data, img_node, "vkCmdClearDepthStencilImage()");
8552        AddCommandBufferBindingImage(dev_data, cb_node, img_node);
8553        std::function<bool()> function = [=]() {
8554            SetImageMemoryValid(dev_data, img_node, true);
8555            return false;
8556        };
8557        cb_node->validate_functions.push_back(function);
8558
8559        skip_call |= addCmd(dev_data, cb_node, CMD_CLEARDEPTHSTENCILIMAGE, "vkCmdClearDepthStencilImage()");
8560        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdClearDepthStencilImage()");
8561    } else {
8562        assert(0);
8563    }
8564    lock.unlock();
8565    if (!skip_call)
8566        dev_data->dispatch_table.CmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount, pRanges);
8567}
8568
8569VKAPI_ATTR void VKAPI_CALL
8570CmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
8571                VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve *pRegions) {
8572    bool skip_call = false;
8573    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8574    std::unique_lock<std::mutex> lock(global_lock);
8575
8576    auto cb_node = getCBNode(dev_data, commandBuffer);
8577    auto src_img_node = getImageNode(dev_data, srcImage);
8578    auto dst_img_node = getImageNode(dev_data, dstImage);
8579    if (cb_node && src_img_node && dst_img_node) {
8580        skip_call |= ValidateMemoryIsBoundToImage(dev_data, src_img_node, "vkCmdResolveImage()");
8581        skip_call |= ValidateMemoryIsBoundToImage(dev_data, dst_img_node, "vkCmdResolveImage()");
8582        // Update bindings between images and cmd buffer
8583        AddCommandBufferBindingImage(dev_data, cb_node, src_img_node);
8584        AddCommandBufferBindingImage(dev_data, cb_node, dst_img_node);
8585        std::function<bool()> function = [=]() {
8586            return ValidateImageMemoryIsValid(dev_data, src_img_node, "vkCmdResolveImage()");
8587        };
8588        cb_node->validate_functions.push_back(function);
8589        function = [=]() {
8590            SetImageMemoryValid(dev_data, dst_img_node, true);
8591            return false;
8592        };
8593        cb_node->validate_functions.push_back(function);
8594
8595        skip_call |= addCmd(dev_data, cb_node, CMD_RESOLVEIMAGE, "vkCmdResolveImage()");
8596        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdResolveImage()");
8597    } else {
8598        assert(0);
8599    }
8600    lock.unlock();
8601    if (!skip_call)
8602        dev_data->dispatch_table.CmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
8603                                                 pRegions);
8604}
8605
8606bool setEventStageMask(VkQueue queue, VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
8607    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8608    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8609    if (pCB) {
8610        pCB->eventToStageMap[event] = stageMask;
8611    }
8612    auto queue_data = dev_data->queueMap.find(queue);
8613    if (queue_data != dev_data->queueMap.end()) {
8614        queue_data->second.eventToStageMap[event] = stageMask;
8615    }
8616    return false;
8617}
8618
8619VKAPI_ATTR void VKAPI_CALL
8620CmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
8621    bool skip_call = false;
8622    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8623    std::unique_lock<std::mutex> lock(global_lock);
8624    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8625    if (pCB) {
8626        skip_call |= addCmd(dev_data, pCB, CMD_SETEVENT, "vkCmdSetEvent()");
8627        skip_call |= insideRenderPass(dev_data, pCB, "vkCmdSetEvent");
8628        auto event_node = getEventNode(dev_data, event);
8629        if (event_node) {
8630            addCommandBufferBinding(&event_node->cb_bindings,
8631                                    {reinterpret_cast<uint64_t &>(event), VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT}, pCB);
8632            event_node->cb_bindings.insert(pCB);
8633        }
8634        pCB->events.push_back(event);
8635        if (!pCB->waitedEvents.count(event)) {
8636            pCB->writeEventsBeforeWait.push_back(event);
8637        }
8638        std::function<bool(VkQueue)> eventUpdate =
8639            std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, stageMask);
8640        pCB->eventUpdates.push_back(eventUpdate);
8641    }
8642    lock.unlock();
8643    if (!skip_call)
8644        dev_data->dispatch_table.CmdSetEvent(commandBuffer, event, stageMask);
8645}
8646
8647VKAPI_ATTR void VKAPI_CALL
8648CmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
8649    bool skip_call = false;
8650    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8651    std::unique_lock<std::mutex> lock(global_lock);
8652    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8653    if (pCB) {
8654        skip_call |= addCmd(dev_data, pCB, CMD_RESETEVENT, "vkCmdResetEvent()");
8655        skip_call |= insideRenderPass(dev_data, pCB, "vkCmdResetEvent");
8656        auto event_node = getEventNode(dev_data, event);
8657        if (event_node) {
8658            addCommandBufferBinding(&event_node->cb_bindings,
8659                                    {reinterpret_cast<uint64_t &>(event), VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT}, pCB);
8660            event_node->cb_bindings.insert(pCB);
8661        }
8662        pCB->events.push_back(event);
8663        if (!pCB->waitedEvents.count(event)) {
8664            pCB->writeEventsBeforeWait.push_back(event);
8665        }
8666        std::function<bool(VkQueue)> eventUpdate =
8667            std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, VkPipelineStageFlags(0));
8668        pCB->eventUpdates.push_back(eventUpdate);
8669    }
8670    lock.unlock();
8671    if (!skip_call)
8672        dev_data->dispatch_table.CmdResetEvent(commandBuffer, event, stageMask);
8673}
8674
8675static bool TransitionImageLayouts(VkCommandBuffer cmdBuffer, uint32_t memBarrierCount,
8676                                   const VkImageMemoryBarrier *pImgMemBarriers) {
8677    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
8678    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
8679    bool skip = false;
8680    uint32_t levelCount = 0;
8681    uint32_t layerCount = 0;
8682
8683    for (uint32_t i = 0; i < memBarrierCount; ++i) {
8684        auto mem_barrier = &pImgMemBarriers[i];
8685        if (!mem_barrier)
8686            continue;
8687        // TODO: Do not iterate over every possibility - consolidate where
8688        // possible
8689        ResolveRemainingLevelsLayers(dev_data, &levelCount, &layerCount, mem_barrier->subresourceRange, mem_barrier->image);
8690
8691        for (uint32_t j = 0; j < levelCount; j++) {
8692            uint32_t level = mem_barrier->subresourceRange.baseMipLevel + j;
8693            for (uint32_t k = 0; k < layerCount; k++) {
8694                uint32_t layer = mem_barrier->subresourceRange.baseArrayLayer + k;
8695                VkImageSubresource sub = {mem_barrier->subresourceRange.aspectMask, level, layer};
8696                IMAGE_CMD_BUF_LAYOUT_NODE node;
8697                if (!FindLayout(pCB, mem_barrier->image, sub, node)) {
8698                    SetLayout(pCB, mem_barrier->image, sub,
8699                              IMAGE_CMD_BUF_LAYOUT_NODE(mem_barrier->oldLayout, mem_barrier->newLayout));
8700                    continue;
8701                }
8702                if (mem_barrier->oldLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
8703                    // TODO: Set memory invalid which is in mem_tracker currently
8704                } else if (node.layout != mem_barrier->oldLayout) {
8705                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8706                                    __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "You cannot transition the layout from %s "
8707                                                                                    "when current layout is %s.",
8708                                    string_VkImageLayout(mem_barrier->oldLayout), string_VkImageLayout(node.layout));
8709                }
8710                SetLayout(pCB, mem_barrier->image, sub, mem_barrier->newLayout);
8711            }
8712        }
8713    }
8714    return skip;
8715}
8716
8717// Print readable FlagBits in FlagMask
8718static std::string string_VkAccessFlags(VkAccessFlags accessMask) {
8719    std::string result;
8720    std::string separator;
8721
8722    if (accessMask == 0) {
8723        result = "[None]";
8724    } else {
8725        result = "[";
8726        for (auto i = 0; i < 32; i++) {
8727            if (accessMask & (1 << i)) {
8728                result = result + separator + string_VkAccessFlagBits((VkAccessFlagBits)(1 << i));
8729                separator = " | ";
8730            }
8731        }
8732        result = result + "]";
8733    }
8734    return result;
8735}
8736
8737// AccessFlags MUST have 'required_bit' set, and may have one or more of 'optional_bits' set.
8738// If required_bit is zero, accessMask must have at least one of 'optional_bits' set
8739// TODO: Add tracking to ensure that at least one barrier has been set for these layout transitions
8740static bool ValidateMaskBits(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
8741                             const VkImageLayout &layout, VkAccessFlags required_bit, VkAccessFlags optional_bits,
8742                             const char *type) {
8743    bool skip_call = false;
8744
8745    if ((accessMask & required_bit) || (!required_bit && (accessMask & optional_bits))) {
8746        if (accessMask & ~(required_bit | optional_bits)) {
8747            // TODO: Verify against Valid Use
8748            skip_call |=
8749                log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8750                        DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask 0x%X %s are specified when layout is %s.",
8751                        type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
8752        }
8753    } else {
8754        if (!required_bit) {
8755            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8756                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must contain at least one of access bits %d "
8757                                                                  "%s when layout is %s, unless the app has previously added a "
8758                                                                  "barrier for this transition.",
8759                                 type, accessMask, string_VkAccessFlags(accessMask).c_str(), optional_bits,
8760                                 string_VkAccessFlags(optional_bits).c_str(), string_VkImageLayout(layout));
8761        } else {
8762            std::string opt_bits;
8763            if (optional_bits != 0) {
8764                std::stringstream ss;
8765                ss << optional_bits;
8766                opt_bits = "and may have optional bits " + ss.str() + ' ' + string_VkAccessFlags(optional_bits);
8767            }
8768            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8769                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must have required access bit %d %s %s when "
8770                                                                  "layout is %s, unless the app has previously added a barrier for "
8771                                                                  "this transition.",
8772                                 type, accessMask, string_VkAccessFlags(accessMask).c_str(), required_bit,
8773                                 string_VkAccessFlags(required_bit).c_str(), opt_bits.c_str(), string_VkImageLayout(layout));
8774        }
8775    }
8776    return skip_call;
8777}
8778
8779static bool ValidateMaskBitsFromLayouts(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
8780                                        const VkImageLayout &layout, const char *type) {
8781    bool skip_call = false;
8782    switch (layout) {
8783    case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: {
8784        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
8785                                      VK_ACCESS_COLOR_ATTACHMENT_READ_BIT, type);
8786        break;
8787    }
8788    case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: {
8789        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
8790                                      VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT, type);
8791        break;
8792    }
8793    case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: {
8794        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_WRITE_BIT, 0, type);
8795        break;
8796    }
8797    case VK_IMAGE_LAYOUT_PREINITIALIZED: {
8798        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_HOST_WRITE_BIT, 0, type);
8799        break;
8800    }
8801    case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: {
8802        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
8803                                      VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type);
8804        break;
8805    }
8806    case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: {
8807        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
8808                                      VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type);
8809        break;
8810    }
8811    case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: {
8812        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_READ_BIT, 0, type);
8813        break;
8814    }
8815    case VK_IMAGE_LAYOUT_PRESENT_SRC_KHR: {
8816        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_MEMORY_READ_BIT, 0, type);
8817        break;
8818    }
8819    case VK_IMAGE_LAYOUT_UNDEFINED: {
8820        if (accessMask != 0) {
8821            // TODO: Verify against Valid Use section spec
8822            skip_call |=
8823                log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8824                        DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask 0x%X %s are specified when layout is %s.",
8825                        type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
8826        }
8827        break;
8828    }
8829    case VK_IMAGE_LAYOUT_GENERAL:
8830    default: { break; }
8831    }
8832    return skip_call;
8833}
8834
8835static bool ValidateBarriers(const char *funcName, VkCommandBuffer cmdBuffer, uint32_t memBarrierCount,
8836                             const VkMemoryBarrier *pMemBarriers, uint32_t bufferBarrierCount,
8837                             const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
8838                             const VkImageMemoryBarrier *pImageMemBarriers) {
8839    bool skip_call = false;
8840    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
8841    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
8842    if (pCB->activeRenderPass && memBarrierCount) {
8843        if (!pCB->activeRenderPass->hasSelfDependency[pCB->activeSubpass]) {
8844            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8845                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Barriers cannot be set during subpass %d "
8846                                                                  "with no self dependency specified.",
8847                                 funcName, pCB->activeSubpass);
8848        }
8849    }
8850    for (uint32_t i = 0; i < imageMemBarrierCount; ++i) {
8851        auto mem_barrier = &pImageMemBarriers[i];
8852        auto image_data = getImageNode(dev_data, mem_barrier->image);
8853        if (image_data) {
8854            uint32_t src_q_f_index = mem_barrier->srcQueueFamilyIndex;
8855            uint32_t dst_q_f_index = mem_barrier->dstQueueFamilyIndex;
8856            if (image_data->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
8857                // srcQueueFamilyIndex and dstQueueFamilyIndex must both
8858                // be VK_QUEUE_FAMILY_IGNORED
8859                if ((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) {
8860                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8861                                         __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
8862                                         "%s: Image Barrier for image 0x%" PRIx64 " was created with sharingMode of "
8863                                         "VK_SHARING_MODE_CONCURRENT.  Src and dst "
8864                                         " queueFamilyIndices must be VK_QUEUE_FAMILY_IGNORED.",
8865                                         funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
8866                }
8867            } else {
8868                // Sharing mode is VK_SHARING_MODE_EXCLUSIVE. srcQueueFamilyIndex and
8869                // dstQueueFamilyIndex must either both be VK_QUEUE_FAMILY_IGNORED,
8870                // or both be a valid queue family
8871                if (((src_q_f_index == VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index == VK_QUEUE_FAMILY_IGNORED)) &&
8872                    (src_q_f_index != dst_q_f_index)) {
8873                    skip_call |=
8874                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8875                                DRAWSTATE_INVALID_QUEUE_INDEX, "DS", "%s: Image 0x%" PRIx64 " was created with sharingMode "
8876                                                                     "of VK_SHARING_MODE_EXCLUSIVE. If one of src- or "
8877                                                                     "dstQueueFamilyIndex is VK_QUEUE_FAMILY_IGNORED, both "
8878                                                                     "must be.",
8879                                funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
8880                } else if (((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) && (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) &&
8881                           ((src_q_f_index >= dev_data->phys_dev_properties.queue_family_properties.size()) ||
8882                            (dst_q_f_index >= dev_data->phys_dev_properties.queue_family_properties.size()))) {
8883                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8884                                         __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
8885                                         "%s: Image 0x%" PRIx64 " was created with sharingMode "
8886                                         "of VK_SHARING_MODE_EXCLUSIVE, but srcQueueFamilyIndex %d"
8887                                         " or dstQueueFamilyIndex %d is greater than " PRINTF_SIZE_T_SPECIFIER
8888                                         "queueFamilies crated for this device.",
8889                                         funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image), src_q_f_index,
8890                                         dst_q_f_index, dev_data->phys_dev_properties.queue_family_properties.size());
8891                }
8892            }
8893        }
8894
8895        if (mem_barrier) {
8896            if (mem_barrier->oldLayout != mem_barrier->newLayout) {
8897                skip_call |=
8898                    ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->srcAccessMask, mem_barrier->oldLayout, "Source");
8899                skip_call |=
8900                    ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->dstAccessMask, mem_barrier->newLayout, "Dest");
8901            }
8902            if (mem_barrier->newLayout == VK_IMAGE_LAYOUT_UNDEFINED || mem_barrier->newLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
8903                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8904                        DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image Layout cannot be transitioned to UNDEFINED or "
8905                                                         "PREINITIALIZED.",
8906                        funcName);
8907            }
8908            auto image_data = getImageNode(dev_data, mem_barrier->image);
8909            VkFormat format = VK_FORMAT_UNDEFINED;
8910            uint32_t arrayLayers = 0, mipLevels = 0;
8911            bool imageFound = false;
8912            if (image_data) {
8913                format = image_data->createInfo.format;
8914                arrayLayers = image_data->createInfo.arrayLayers;
8915                mipLevels = image_data->createInfo.mipLevels;
8916                imageFound = true;
8917            } else if (dev_data->device_extensions.wsi_enabled) {
8918                auto imageswap_data = getSwapchainFromImage(dev_data, mem_barrier->image);
8919                if (imageswap_data) {
8920                    auto swapchain_data = getSwapchainNode(dev_data, imageswap_data);
8921                    if (swapchain_data) {
8922                        format = swapchain_data->createInfo.imageFormat;
8923                        arrayLayers = swapchain_data->createInfo.imageArrayLayers;
8924                        mipLevels = 1;
8925                        imageFound = true;
8926                    }
8927                }
8928            }
8929            if (imageFound) {
8930                auto aspect_mask = mem_barrier->subresourceRange.aspectMask;
8931                if (vk_format_is_depth_or_stencil(format)) {
8932                    if (vk_format_is_depth_and_stencil(format)) {
8933                        if (!(aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT) && !(aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT)) {
8934                            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8935                                    __LINE__, DRAWSTATE_INVALID_BARRIER, "DS",
8936                                    "%s: Image is a depth and stencil format and thus must "
8937                                    "have either one or both of VK_IMAGE_ASPECT_DEPTH_BIT and "
8938                                    "VK_IMAGE_ASPECT_STENCIL_BIT set.",
8939                                    funcName);
8940                        }
8941                    } else if (vk_format_is_depth_only(format)) {
8942                        if (!(aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT)) {
8943                            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8944                                    __LINE__, DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image is a depth-only format and thus must "
8945                                                                               "have VK_IMAGE_ASPECT_DEPTH_BIT set.",
8946                                    funcName);
8947                        }
8948                    } else { // stencil-only case
8949                        if (!(aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT)) {
8950                            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8951                                    __LINE__, DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image is a stencil-only format and thus must "
8952                                                                               "have VK_IMAGE_ASPECT_STENCIL_BIT set.",
8953                                    funcName);
8954                        }
8955                    }
8956                } else { // image is a color format
8957                    if (!(aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT)) {
8958                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8959                                DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image is a color format and thus must "
8960                                                                 "have VK_IMAGE_ASPECT_COLOR_BIT set.",
8961                                funcName);
8962                    }
8963                }
8964                int layerCount = (mem_barrier->subresourceRange.layerCount == VK_REMAINING_ARRAY_LAYERS)
8965                                     ? 1
8966                                     : mem_barrier->subresourceRange.layerCount;
8967                if ((mem_barrier->subresourceRange.baseArrayLayer + layerCount) > arrayLayers) {
8968                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8969                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the "
8970                                                             "baseArrayLayer (%d) and layerCount (%d) be less "
8971                                                             "than or equal to the total number of layers (%d).",
8972                            funcName, mem_barrier->subresourceRange.baseArrayLayer, mem_barrier->subresourceRange.layerCount,
8973                            arrayLayers);
8974                }
8975                int levelCount = (mem_barrier->subresourceRange.levelCount == VK_REMAINING_MIP_LEVELS)
8976                                     ? 1
8977                                     : mem_barrier->subresourceRange.levelCount;
8978                if ((mem_barrier->subresourceRange.baseMipLevel + levelCount) > mipLevels) {
8979                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8980                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the baseMipLevel "
8981                                                             "(%d) and levelCount (%d) be less than or equal to "
8982                                                             "the total number of levels (%d).",
8983                            funcName, mem_barrier->subresourceRange.baseMipLevel, mem_barrier->subresourceRange.levelCount,
8984                            mipLevels);
8985                }
8986            }
8987        }
8988    }
8989    for (uint32_t i = 0; i < bufferBarrierCount; ++i) {
8990        auto mem_barrier = &pBufferMemBarriers[i];
8991        if (pCB->activeRenderPass) {
8992            skip_call |=
8993                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8994                        DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barriers cannot be used during a render pass.", funcName);
8995        }
8996        if (!mem_barrier)
8997            continue;
8998
8999        // Validate buffer barrier queue family indices
9000        if ((mem_barrier->srcQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
9001             mem_barrier->srcQueueFamilyIndex >= dev_data->phys_dev_properties.queue_family_properties.size()) ||
9002            (mem_barrier->dstQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
9003             mem_barrier->dstQueueFamilyIndex >= dev_data->phys_dev_properties.queue_family_properties.size())) {
9004            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9005                                 DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
9006                                 "%s: Buffer Barrier 0x%" PRIx64 " has QueueFamilyIndex greater "
9007                                 "than the number of QueueFamilies (" PRINTF_SIZE_T_SPECIFIER ") for this device.",
9008                                 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
9009                                 dev_data->phys_dev_properties.queue_family_properties.size());
9010        }
9011
9012        auto buffer_node = getBufferNode(dev_data, mem_barrier->buffer);
9013        if (buffer_node) {
9014            auto buffer_size = buffer_node->memSize;
9015            if (mem_barrier->offset >= buffer_size) {
9016                skip_call |= log_msg(
9017                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9018                    DRAWSTATE_INVALID_BARRIER, "DS",
9019                    "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64 " which is not less than total size 0x%" PRIx64 ".",
9020                    funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
9021                    reinterpret_cast<const uint64_t &>(mem_barrier->offset), reinterpret_cast<const uint64_t &>(buffer_size));
9022            } else if (mem_barrier->size != VK_WHOLE_SIZE && (mem_barrier->offset + mem_barrier->size > buffer_size)) {
9023                skip_call |= log_msg(
9024                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9025                    DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64 " and size 0x%" PRIx64
9026                                                     " whose sum is greater than total size 0x%" PRIx64 ".",
9027                    funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
9028                    reinterpret_cast<const uint64_t &>(mem_barrier->offset), reinterpret_cast<const uint64_t &>(mem_barrier->size),
9029                    reinterpret_cast<const uint64_t &>(buffer_size));
9030            }
9031        }
9032    }
9033    return skip_call;
9034}
9035
9036bool validateEventStageMask(VkQueue queue, GLOBAL_CB_NODE *pCB, uint32_t eventCount, size_t firstEventIndex, VkPipelineStageFlags sourceStageMask) {
9037    bool skip_call = false;
9038    VkPipelineStageFlags stageMask = 0;
9039    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
9040    for (uint32_t i = 0; i < eventCount; ++i) {
9041        auto event = pCB->events[firstEventIndex + i];
9042        auto queue_data = dev_data->queueMap.find(queue);
9043        if (queue_data == dev_data->queueMap.end())
9044            return false;
9045        auto event_data = queue_data->second.eventToStageMap.find(event);
9046        if (event_data != queue_data->second.eventToStageMap.end()) {
9047            stageMask |= event_data->second;
9048        } else {
9049            auto global_event_data = getEventNode(dev_data, event);
9050            if (!global_event_data) {
9051                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
9052                                     reinterpret_cast<const uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
9053                                     "Event 0x%" PRIx64 " cannot be waited on if it has never been set.",
9054                                     reinterpret_cast<const uint64_t &>(event));
9055            } else {
9056                stageMask |= global_event_data->stageMask;
9057            }
9058        }
9059    }
9060    // TODO: Need to validate that host_bit is only set if set event is called
9061    // but set event can be called at any time.
9062    if (sourceStageMask != stageMask && sourceStageMask != (stageMask | VK_PIPELINE_STAGE_HOST_BIT)) {
9063        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9064                             DRAWSTATE_INVALID_EVENT, "DS", "Submitting cmdbuffer with call to VkCmdWaitEvents "
9065                                                            "using srcStageMask 0x%X which must be the bitwise "
9066                                                            "OR of the stageMask parameters used in calls to "
9067                                                            "vkCmdSetEvent and VK_PIPELINE_STAGE_HOST_BIT if "
9068                                                            "used with vkSetEvent but instead is 0x%X.",
9069                             sourceStageMask, stageMask);
9070    }
9071    return skip_call;
9072}
9073
9074VKAPI_ATTR void VKAPI_CALL
9075CmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents, VkPipelineStageFlags sourceStageMask,
9076              VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
9077              uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
9078              uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
9079    bool skip_call = false;
9080    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9081    std::unique_lock<std::mutex> lock(global_lock);
9082    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9083    if (pCB) {
9084        auto firstEventIndex = pCB->events.size();
9085        for (uint32_t i = 0; i < eventCount; ++i) {
9086            auto event_node = getEventNode(dev_data, pEvents[i]);
9087            if (event_node) {
9088                addCommandBufferBinding(&event_node->cb_bindings,
9089                                        {reinterpret_cast<const uint64_t &>(pEvents[i]), VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT},
9090                                        pCB);
9091                event_node->cb_bindings.insert(pCB);
9092            }
9093            pCB->waitedEvents.insert(pEvents[i]);
9094            pCB->events.push_back(pEvents[i]);
9095        }
9096        std::function<bool(VkQueue)> eventUpdate =
9097            std::bind(validateEventStageMask, std::placeholders::_1, pCB, eventCount, firstEventIndex, sourceStageMask);
9098        pCB->eventUpdates.push_back(eventUpdate);
9099        if (pCB->state == CB_RECORDING) {
9100            skip_call |= addCmd(dev_data, pCB, CMD_WAITEVENTS, "vkCmdWaitEvents()");
9101        } else {
9102            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWaitEvents()");
9103        }
9104        skip_call |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
9105        skip_call |=
9106            ValidateBarriers("vkCmdWaitEvents", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
9107                             pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
9108    }
9109    lock.unlock();
9110    if (!skip_call)
9111        dev_data->dispatch_table.CmdWaitEvents(commandBuffer, eventCount, pEvents, sourceStageMask, dstStageMask,
9112                                               memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
9113                                               imageMemoryBarrierCount, pImageMemoryBarriers);
9114}
9115
9116VKAPI_ATTR void VKAPI_CALL
9117CmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
9118                   VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
9119                   uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
9120                   uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
9121    bool skip_call = false;
9122    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9123    std::unique_lock<std::mutex> lock(global_lock);
9124    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9125    if (pCB) {
9126        skip_call |= addCmd(dev_data, pCB, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()");
9127        skip_call |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
9128        skip_call |=
9129            ValidateBarriers("vkCmdPipelineBarrier", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
9130                             pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
9131    }
9132    lock.unlock();
9133    if (!skip_call)
9134        dev_data->dispatch_table.CmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags, memoryBarrierCount,
9135                                                    pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
9136                                                    imageMemoryBarrierCount, pImageMemoryBarriers);
9137}
9138
9139bool setQueryState(VkQueue queue, VkCommandBuffer commandBuffer, QueryObject object, bool value) {
9140    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9141    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9142    if (pCB) {
9143        pCB->queryToStateMap[object] = value;
9144    }
9145    auto queue_data = dev_data->queueMap.find(queue);
9146    if (queue_data != dev_data->queueMap.end()) {
9147        queue_data->second.queryToStateMap[object] = value;
9148    }
9149    return false;
9150}
9151
9152VKAPI_ATTR void VKAPI_CALL
9153CmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) {
9154    bool skip_call = false;
9155    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9156    std::unique_lock<std::mutex> lock(global_lock);
9157    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9158    if (pCB) {
9159        QueryObject query = {queryPool, slot};
9160        pCB->activeQueries.insert(query);
9161        if (!pCB->startedQueries.count(query)) {
9162            pCB->startedQueries.insert(query);
9163        }
9164        skip_call |= addCmd(dev_data, pCB, CMD_BEGINQUERY, "vkCmdBeginQuery()");
9165        addCommandBufferBinding(&getQueryPoolNode(dev_data, queryPool)->cb_bindings,
9166                                {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT}, pCB);
9167    }
9168    lock.unlock();
9169    if (!skip_call)
9170        dev_data->dispatch_table.CmdBeginQuery(commandBuffer, queryPool, slot, flags);
9171}
9172
9173VKAPI_ATTR void VKAPI_CALL CmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) {
9174    bool skip_call = false;
9175    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9176    std::unique_lock<std::mutex> lock(global_lock);
9177    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9178    if (pCB) {
9179        QueryObject query = {queryPool, slot};
9180        if (!pCB->activeQueries.count(query)) {
9181            skip_call |=
9182                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9183                        DRAWSTATE_INVALID_QUERY, "DS", "Ending a query before it was started: queryPool 0x%" PRIx64 ", index %d",
9184                        (uint64_t)(queryPool), slot);
9185        } else {
9186            pCB->activeQueries.erase(query);
9187        }
9188        std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, true);
9189        pCB->queryUpdates.push_back(queryUpdate);
9190        if (pCB->state == CB_RECORDING) {
9191            skip_call |= addCmd(dev_data, pCB, CMD_ENDQUERY, "VkCmdEndQuery()");
9192        } else {
9193            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdEndQuery()");
9194        }
9195        addCommandBufferBinding(&getQueryPoolNode(dev_data, queryPool)->cb_bindings,
9196                                {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT}, pCB);
9197    }
9198    lock.unlock();
9199    if (!skip_call)
9200        dev_data->dispatch_table.CmdEndQuery(commandBuffer, queryPool, slot);
9201}
9202
9203VKAPI_ATTR void VKAPI_CALL
9204CmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) {
9205    bool skip_call = false;
9206    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9207    std::unique_lock<std::mutex> lock(global_lock);
9208    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9209    if (pCB) {
9210        for (uint32_t i = 0; i < queryCount; i++) {
9211            QueryObject query = {queryPool, firstQuery + i};
9212            pCB->waitedEventsBeforeQueryReset[query] = pCB->waitedEvents;
9213            std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, false);
9214            pCB->queryUpdates.push_back(queryUpdate);
9215        }
9216        if (pCB->state == CB_RECORDING) {
9217            skip_call |= addCmd(dev_data, pCB, CMD_RESETQUERYPOOL, "VkCmdResetQueryPool()");
9218        } else {
9219            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdResetQueryPool()");
9220        }
9221        skip_call |= insideRenderPass(dev_data, pCB, "vkCmdQueryPool");
9222        addCommandBufferBinding(&getQueryPoolNode(dev_data, queryPool)->cb_bindings,
9223                                {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT}, pCB);
9224    }
9225    lock.unlock();
9226    if (!skip_call)
9227        dev_data->dispatch_table.CmdResetQueryPool(commandBuffer, queryPool, firstQuery, queryCount);
9228}
9229
9230bool validateQuery(VkQueue queue, GLOBAL_CB_NODE *pCB, VkQueryPool queryPool, uint32_t queryCount, uint32_t firstQuery) {
9231    bool skip_call = false;
9232    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(pCB->commandBuffer), layer_data_map);
9233    auto queue_data = dev_data->queueMap.find(queue);
9234    if (queue_data == dev_data->queueMap.end())
9235        return false;
9236    for (uint32_t i = 0; i < queryCount; i++) {
9237        QueryObject query = {queryPool, firstQuery + i};
9238        auto query_data = queue_data->second.queryToStateMap.find(query);
9239        bool fail = false;
9240        if (query_data != queue_data->second.queryToStateMap.end()) {
9241            if (!query_data->second) {
9242                fail = true;
9243            }
9244        } else {
9245            auto global_query_data = dev_data->queryToStateMap.find(query);
9246            if (global_query_data != dev_data->queryToStateMap.end()) {
9247                if (!global_query_data->second) {
9248                    fail = true;
9249                }
9250            } else {
9251                fail = true;
9252            }
9253        }
9254        if (fail) {
9255            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9256                                 DRAWSTATE_INVALID_QUERY, "DS",
9257                                 "Requesting a copy from query to buffer with invalid query: queryPool 0x%" PRIx64 ", index %d",
9258                                 reinterpret_cast<uint64_t &>(queryPool), firstQuery + i);
9259        }
9260    }
9261    return skip_call;
9262}
9263
9264VKAPI_ATTR void VKAPI_CALL
9265CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
9266                        VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags) {
9267    bool skip_call = false;
9268    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9269    std::unique_lock<std::mutex> lock(global_lock);
9270
9271    auto cb_node = getCBNode(dev_data, commandBuffer);
9272    auto dst_buff_node = getBufferNode(dev_data, dstBuffer);
9273    if (cb_node && dst_buff_node) {
9274        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_node, "vkCmdCopyQueryPoolResults()");
9275        // Update bindings between buffer and cmd buffer
9276        AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_node);
9277        // Validate that DST buffer has correct usage flags set
9278        skip_call |= ValidateBufferUsageFlags(dev_data, dst_buff_node, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
9279                                              "vkCmdCopyQueryPoolResults()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
9280        std::function<bool()> function = [=]() {
9281            SetBufferMemoryValid(dev_data, dst_buff_node, true);
9282            return false;
9283        };
9284        cb_node->validate_functions.push_back(function);
9285        std::function<bool(VkQueue)> queryUpdate =
9286            std::bind(validateQuery, std::placeholders::_1, cb_node, queryPool, queryCount, firstQuery);
9287        cb_node->queryUpdates.push_back(queryUpdate);
9288        if (cb_node->state == CB_RECORDING) {
9289            skip_call |= addCmd(dev_data, cb_node, CMD_COPYQUERYPOOLRESULTS, "vkCmdCopyQueryPoolResults()");
9290        } else {
9291            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdCopyQueryPoolResults()");
9292        }
9293        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyQueryPoolResults()");
9294        addCommandBufferBinding(&getQueryPoolNode(dev_data, queryPool)->cb_bindings,
9295                                {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT}, cb_node);
9296    } else {
9297        assert(0);
9298    }
9299    lock.unlock();
9300    if (!skip_call)
9301        dev_data->dispatch_table.CmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer, dstOffset,
9302                                                         stride, flags);
9303}
9304
9305VKAPI_ATTR void VKAPI_CALL CmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout,
9306                                            VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size,
9307                                            const void *pValues) {
9308    bool skip_call = false;
9309    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9310    std::unique_lock<std::mutex> lock(global_lock);
9311    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9312    if (pCB) {
9313        if (pCB->state == CB_RECORDING) {
9314            skip_call |= addCmd(dev_data, pCB, CMD_PUSHCONSTANTS, "vkCmdPushConstants()");
9315        } else {
9316            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdPushConstants()");
9317        }
9318    }
9319    skip_call |= validatePushConstantRange(dev_data, offset, size, "vkCmdPushConstants()");
9320    if (0 == stageFlags) {
9321        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9322                             DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCmdPushConstants() call has no stageFlags set.");
9323    }
9324
9325    // Check if push constant update is within any of the ranges with the same stage flags specified in pipeline layout.
9326    auto pipeline_layout = getPipelineStateLayout(dev_data, layout);
9327    // Coalesce adjacent/overlapping pipeline ranges before checking to see if incoming range is
9328    // contained in the pipeline ranges.
9329    // Build a {start, end} span list for ranges with matching stage flags.
9330    const auto &ranges = pipeline_layout->push_constant_ranges;
9331    struct span {
9332        uint32_t start;
9333        uint32_t end;
9334    };
9335    std::vector<span> spans;
9336    spans.reserve(ranges.size());
9337    for (const auto &iter : ranges) {
9338        if (iter.stageFlags == stageFlags) {
9339            spans.push_back({iter.offset, iter.offset + iter.size});
9340        }
9341    }
9342    if (spans.size() == 0) {
9343        // There were no ranges that matched the stageFlags.
9344        skip_call |=
9345            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9346                    DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCmdPushConstants() stageFlags = 0x%" PRIx32 " do not match "
9347                                                          "the stageFlags in any of the ranges in pipeline layout 0x%" PRIx64 ".",
9348                    (uint32_t)stageFlags, (uint64_t)layout);
9349    } else {
9350        // Sort span list by start value.
9351        struct comparer {
9352            bool operator()(struct span i, struct span j) { return i.start < j.start; }
9353        } my_comparer;
9354        std::sort(spans.begin(), spans.end(), my_comparer);
9355
9356        // Examine two spans at a time.
9357        std::vector<span>::iterator current = spans.begin();
9358        std::vector<span>::iterator next = current + 1;
9359        while (next != spans.end()) {
9360            if (current->end < next->start) {
9361                // There is a gap; cannot coalesce. Move to the next two spans.
9362                ++current;
9363                ++next;
9364            } else {
9365                // Coalesce the two spans.  The start of the next span
9366                // is within the current span, so pick the larger of
9367                // the end values to extend the current span.
9368                // Then delete the next span and set next to the span after it.
9369                current->end = max(current->end, next->end);
9370                next = spans.erase(next);
9371            }
9372        }
9373
9374        // Now we can check if the incoming range is within any of the spans.
9375        bool contained_in_a_range = false;
9376        for (uint32_t i = 0; i < spans.size(); ++i) {
9377            if ((offset >= spans[i].start) && ((uint64_t)offset + (uint64_t)size <= (uint64_t)spans[i].end)) {
9378                contained_in_a_range = true;
9379                break;
9380            }
9381        }
9382        if (!contained_in_a_range) {
9383            skip_call |=
9384                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9385                        DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCmdPushConstants() Push constant range [%d, %d) "
9386                                                              "with stageFlags = 0x%" PRIx32 " "
9387                                                              "not within flag-matching ranges in pipeline layout 0x%" PRIx64 ".",
9388                        offset, offset + size, (uint32_t)stageFlags, (uint64_t)layout);
9389        }
9390    }
9391    lock.unlock();
9392    if (!skip_call)
9393        dev_data->dispatch_table.CmdPushConstants(commandBuffer, layout, stageFlags, offset, size, pValues);
9394}
9395
9396VKAPI_ATTR void VKAPI_CALL
9397CmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t slot) {
9398    bool skip_call = false;
9399    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9400    std::unique_lock<std::mutex> lock(global_lock);
9401    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9402    if (pCB) {
9403        QueryObject query = {queryPool, slot};
9404        std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, true);
9405        pCB->queryUpdates.push_back(queryUpdate);
9406        if (pCB->state == CB_RECORDING) {
9407            skip_call |= addCmd(dev_data, pCB, CMD_WRITETIMESTAMP, "vkCmdWriteTimestamp()");
9408        } else {
9409            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWriteTimestamp()");
9410        }
9411    }
9412    lock.unlock();
9413    if (!skip_call)
9414        dev_data->dispatch_table.CmdWriteTimestamp(commandBuffer, pipelineStage, queryPool, slot);
9415}
9416
9417static bool MatchUsage(layer_data *dev_data, uint32_t count, const VkAttachmentReference *attachments,
9418                       const VkFramebufferCreateInfo *fbci, VkImageUsageFlagBits usage_flag) {
9419    bool skip_call = false;
9420
9421    for (uint32_t attach = 0; attach < count; attach++) {
9422        if (attachments[attach].attachment != VK_ATTACHMENT_UNUSED) {
9423            // Attachment counts are verified elsewhere, but prevent an invalid access
9424            if (attachments[attach].attachment < fbci->attachmentCount) {
9425                const VkImageView *image_view = &fbci->pAttachments[attachments[attach].attachment];
9426                auto view_state = getImageViewState(dev_data, *image_view);
9427                if (view_state) {
9428                    const VkImageCreateInfo *ici = &getImageNode(dev_data, view_state->create_info.image)->createInfo;
9429                    if (ici != nullptr) {
9430                        if ((ici->usage & usage_flag) == 0) {
9431                            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9432                                                 (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_USAGE, "DS",
9433                                                 "vkCreateFramebuffer:  Framebuffer Attachment (%d) conflicts with the image's "
9434                                                 "IMAGE_USAGE flags (%s).",
9435                                                 attachments[attach].attachment, string_VkImageUsageFlagBits(usage_flag));
9436                        }
9437                    }
9438                }
9439            }
9440        }
9441    }
9442    return skip_call;
9443}
9444
9445// Validate VkFramebufferCreateInfo which includes:
9446// 1. attachmentCount equals renderPass attachmentCount
9447// 2. corresponding framebuffer and renderpass attachments have matching formats
9448// 3. corresponding framebuffer and renderpass attachments have matching sample counts
9449// 4. fb attachments only have a single mip level
9450// 5. fb attachment dimensions are each at least as large as the fb
9451// 6. fb attachments use idenity swizzle
9452// 7. fb attachments used by renderPass for color/input/ds have correct usage bit set
9453// 8. fb dimensions are within physical device limits
9454static bool ValidateFramebufferCreateInfo(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) {
9455    bool skip_call = false;
9456
9457    auto rp_node = getRenderPass(dev_data, pCreateInfo->renderPass);
9458    if (rp_node) {
9459        const VkRenderPassCreateInfo *rpci = rp_node->createInfo.ptr();
9460        if (rpci->attachmentCount != pCreateInfo->attachmentCount) {
9461            skip_call |= log_msg(
9462                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
9463                reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
9464                "vkCreateFramebuffer(): VkFramebufferCreateInfo attachmentCount of %u does not match attachmentCount of %u of "
9465                "renderPass (0x%" PRIxLEAST64 ") being used to create Framebuffer.",
9466                pCreateInfo->attachmentCount, rpci->attachmentCount, reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass));
9467        } else {
9468            // attachmentCounts match, so make sure corresponding attachment details line up
9469            const VkImageView *image_views = pCreateInfo->pAttachments;
9470            for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
9471                auto view_state = getImageViewState(dev_data, image_views[i]);
9472                auto &ivci = view_state->create_info;
9473                if (ivci.format != rpci->pAttachments[i].format) {
9474                    skip_call |= log_msg(
9475                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
9476                        reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE,
9477                        "DS", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has format of %s that does not match "
9478                              "the format of "
9479                              "%s used by the corresponding attachment for renderPass (0x%" PRIxLEAST64 ").",
9480                        i, string_VkFormat(ivci.format), string_VkFormat(rpci->pAttachments[i].format),
9481                        reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass));
9482                }
9483                const VkImageCreateInfo *ici = &getImageNode(dev_data, ivci.image)->createInfo;
9484                if (ici->samples != rpci->pAttachments[i].samples) {
9485                    skip_call |= log_msg(
9486                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
9487                        reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE,
9488                        "DS", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has %s samples that do not match "
9489                              "the %s samples used by the corresponding attachment for renderPass (0x%" PRIxLEAST64 ").",
9490                        i, string_VkSampleCountFlagBits(ici->samples), string_VkSampleCountFlagBits(rpci->pAttachments[i].samples),
9491                        reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass));
9492                }
9493                // Verify that view only has a single mip level
9494                if (ivci.subresourceRange.levelCount != 1) {
9495                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
9496                                         __LINE__, DRAWSTATE_INVALID_FRAMEBUFFER_CREATE_INFO, "DS",
9497                                         "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has mip levelCount of %u "
9498                                         "but only a single mip level (levelCount ==  1) is allowed when creating a Framebuffer.",
9499                                         i, ivci.subresourceRange.levelCount);
9500                }
9501                const uint32_t mip_level = ivci.subresourceRange.baseMipLevel;
9502                uint32_t mip_width = max(1u, ici->extent.width >> mip_level);
9503                uint32_t mip_height = max(1u, ici->extent.height >> mip_level);
9504                if ((ivci.subresourceRange.layerCount < pCreateInfo->layers) || (mip_width < pCreateInfo->width) ||
9505                    (mip_height < pCreateInfo->height)) {
9506                    skip_call |=
9507                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
9508                                DRAWSTATE_INVALID_FRAMEBUFFER_CREATE_INFO, "DS",
9509                                "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has dimensions smaller "
9510                                "than the corresponding "
9511                                "framebuffer dimensions. Attachment dimensions must be at least as large. Here are the respective "
9512                                "dimensions for "
9513                                "attachment #%u, framebuffer:\n"
9514                                "width: %u, %u\n"
9515                                "height: %u, %u\n"
9516                                "layerCount: %u, %u\n",
9517                                i, ivci.subresourceRange.baseMipLevel, i, mip_width, pCreateInfo->width, mip_height,
9518                                pCreateInfo->height, ivci.subresourceRange.layerCount, pCreateInfo->layers);
9519                }
9520                if (((ivci.components.r != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.r != VK_COMPONENT_SWIZZLE_R)) ||
9521                    ((ivci.components.g != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.g != VK_COMPONENT_SWIZZLE_G)) ||
9522                    ((ivci.components.b != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.b != VK_COMPONENT_SWIZZLE_B)) ||
9523                    ((ivci.components.a != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.a != VK_COMPONENT_SWIZZLE_A))) {
9524                    skip_call |= log_msg(
9525                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
9526                        DRAWSTATE_INVALID_FRAMEBUFFER_CREATE_INFO, "DS",
9527                        "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has non-identy swizzle. All framebuffer "
9528                        "attachments must have been created with the identity swizzle. Here are the actual swizzle values:\n"
9529                        "r swizzle = %s\n"
9530                        "g swizzle = %s\n"
9531                        "b swizzle = %s\n"
9532                        "a swizzle = %s\n",
9533                        i, string_VkComponentSwizzle(ivci.components.r), string_VkComponentSwizzle(ivci.components.g),
9534                        string_VkComponentSwizzle(ivci.components.b), string_VkComponentSwizzle(ivci.components.a));
9535                }
9536            }
9537        }
9538        // Verify correct attachment usage flags
9539        for (uint32_t subpass = 0; subpass < rpci->subpassCount; subpass++) {
9540            // Verify input attachments:
9541            skip_call |= MatchUsage(dev_data, rpci->pSubpasses[subpass].inputAttachmentCount,
9542                                    rpci->pSubpasses[subpass].pInputAttachments, pCreateInfo, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT);
9543            // Verify color attachments:
9544            skip_call |= MatchUsage(dev_data, rpci->pSubpasses[subpass].colorAttachmentCount,
9545                                    rpci->pSubpasses[subpass].pColorAttachments, pCreateInfo, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT);
9546            // Verify depth/stencil attachments:
9547            if (rpci->pSubpasses[subpass].pDepthStencilAttachment != nullptr) {
9548                skip_call |= MatchUsage(dev_data, 1, rpci->pSubpasses[subpass].pDepthStencilAttachment, pCreateInfo,
9549                                        VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT);
9550            }
9551        }
9552    } else {
9553        skip_call |=
9554            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
9555                    reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9556                    "vkCreateFramebuffer(): Attempt to create framebuffer with invalid renderPass (0x%" PRIxLEAST64 ").",
9557                    reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass));
9558    }
9559    // Verify FB dimensions are within physical device limits
9560    if ((pCreateInfo->height > dev_data->phys_dev_properties.properties.limits.maxFramebufferHeight) ||
9561        (pCreateInfo->width > dev_data->phys_dev_properties.properties.limits.maxFramebufferWidth) ||
9562        (pCreateInfo->layers > dev_data->phys_dev_properties.properties.limits.maxFramebufferLayers)) {
9563        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
9564                             DRAWSTATE_INVALID_FRAMEBUFFER_CREATE_INFO, "DS",
9565                             "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo dimensions exceed physical device limits. "
9566                             "Here are the respective dimensions: requested, device max:\n"
9567                             "width: %u, %u\n"
9568                             "height: %u, %u\n"
9569                             "layerCount: %u, %u\n",
9570                             pCreateInfo->width, dev_data->phys_dev_properties.properties.limits.maxFramebufferWidth,
9571                             pCreateInfo->height, dev_data->phys_dev_properties.properties.limits.maxFramebufferHeight,
9572                             pCreateInfo->layers, dev_data->phys_dev_properties.properties.limits.maxFramebufferLayers);
9573    }
9574    return skip_call;
9575}
9576
9577// Validate VkFramebufferCreateInfo state prior to calling down chain to create Framebuffer object
9578//  Return true if an error is encountered and callback returns true to skip call down chain
9579//   false indicates that call down chain should proceed
9580static bool PreCallValidateCreateFramebuffer(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) {
9581    // TODO : Verify that renderPass FB is created with is compatible with FB
9582    bool skip_call = false;
9583    skip_call |= ValidateFramebufferCreateInfo(dev_data, pCreateInfo);
9584    return skip_call;
9585}
9586
9587// CreateFramebuffer state has been validated and call down chain completed so record new framebuffer object
9588static void PostCallRecordCreateFramebuffer(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo, VkFramebuffer fb) {
9589    // Shadow create info and store in map
9590    std::unique_ptr<FRAMEBUFFER_STATE> fb_state(
9591        new FRAMEBUFFER_STATE(fb, pCreateInfo, dev_data->renderPassMap[pCreateInfo->renderPass]->createInfo.ptr()));
9592
9593    for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
9594        VkImageView view = pCreateInfo->pAttachments[i];
9595        auto view_state = getImageViewState(dev_data, view);
9596        if (!view_state) {
9597            continue;
9598        }
9599        MT_FB_ATTACHMENT_INFO fb_info;
9600        fb_info.mem = getImageNode(dev_data, view_state->create_info.image)->mem;
9601        fb_info.view_state = view_state;
9602        fb_info.image = view_state->create_info.image;
9603        fb_state->attachments.push_back(fb_info);
9604    }
9605    dev_data->frameBufferMap[fb] = std::move(fb_state);
9606}
9607
9608VKAPI_ATTR VkResult VKAPI_CALL CreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo,
9609                                                 const VkAllocationCallbacks *pAllocator,
9610                                                 VkFramebuffer *pFramebuffer) {
9611    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9612    std::unique_lock<std::mutex> lock(global_lock);
9613    bool skip_call = PreCallValidateCreateFramebuffer(dev_data, pCreateInfo);
9614    lock.unlock();
9615
9616    if (skip_call)
9617        return VK_ERROR_VALIDATION_FAILED_EXT;
9618
9619    VkResult result = dev_data->dispatch_table.CreateFramebuffer(device, pCreateInfo, pAllocator, pFramebuffer);
9620
9621    if (VK_SUCCESS == result) {
9622        lock.lock();
9623        PostCallRecordCreateFramebuffer(dev_data, pCreateInfo, *pFramebuffer);
9624        lock.unlock();
9625    }
9626    return result;
9627}
9628
9629static bool FindDependency(const int index, const int dependent, const std::vector<DAGNode> &subpass_to_node,
9630                           std::unordered_set<uint32_t> &processed_nodes) {
9631    // If we have already checked this node we have not found a dependency path so return false.
9632    if (processed_nodes.count(index))
9633        return false;
9634    processed_nodes.insert(index);
9635    const DAGNode &node = subpass_to_node[index];
9636    // Look for a dependency path. If one exists return true else recurse on the previous nodes.
9637    if (std::find(node.prev.begin(), node.prev.end(), dependent) == node.prev.end()) {
9638        for (auto elem : node.prev) {
9639            if (FindDependency(elem, dependent, subpass_to_node, processed_nodes))
9640                return true;
9641        }
9642    } else {
9643        return true;
9644    }
9645    return false;
9646}
9647
9648static bool CheckDependencyExists(const layer_data *dev_data, const int subpass, const std::vector<uint32_t> &dependent_subpasses,
9649                                  const std::vector<DAGNode> &subpass_to_node, bool &skip_call) {
9650    bool result = true;
9651    // Loop through all subpasses that share the same attachment and make sure a dependency exists
9652    for (uint32_t k = 0; k < dependent_subpasses.size(); ++k) {
9653        if (static_cast<uint32_t>(subpass) == dependent_subpasses[k])
9654            continue;
9655        const DAGNode &node = subpass_to_node[subpass];
9656        // Check for a specified dependency between the two nodes. If one exists we are done.
9657        auto prev_elem = std::find(node.prev.begin(), node.prev.end(), dependent_subpasses[k]);
9658        auto next_elem = std::find(node.next.begin(), node.next.end(), dependent_subpasses[k]);
9659        if (prev_elem == node.prev.end() && next_elem == node.next.end()) {
9660            // If no dependency exits an implicit dependency still might. If not, throw an error.
9661            std::unordered_set<uint32_t> processed_nodes;
9662            if (!(FindDependency(subpass, dependent_subpasses[k], subpass_to_node, processed_nodes) ||
9663                FindDependency(dependent_subpasses[k], subpass, subpass_to_node, processed_nodes))) {
9664                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9665                                     __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9666                                     "A dependency between subpasses %d and %d must exist but one is not specified.", subpass,
9667                                     dependent_subpasses[k]);
9668                result = false;
9669            }
9670        }
9671    }
9672    return result;
9673}
9674
9675static bool CheckPreserved(const layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo, const int index,
9676                           const uint32_t attachment, const std::vector<DAGNode> &subpass_to_node, int depth, bool &skip_call) {
9677    const DAGNode &node = subpass_to_node[index];
9678    // If this node writes to the attachment return true as next nodes need to preserve the attachment.
9679    const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
9680    for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9681        if (attachment == subpass.pColorAttachments[j].attachment)
9682            return true;
9683    }
9684    if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9685        if (attachment == subpass.pDepthStencilAttachment->attachment)
9686            return true;
9687    }
9688    bool result = false;
9689    // Loop through previous nodes and see if any of them write to the attachment.
9690    for (auto elem : node.prev) {
9691        result |= CheckPreserved(dev_data, pCreateInfo, elem, attachment, subpass_to_node, depth + 1, skip_call);
9692    }
9693    // If the attachment was written to by a previous node than this node needs to preserve it.
9694    if (result && depth > 0) {
9695        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
9696        bool has_preserved = false;
9697        for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
9698            if (subpass.pPreserveAttachments[j] == attachment) {
9699                has_preserved = true;
9700                break;
9701            }
9702        }
9703        if (!has_preserved) {
9704            skip_call |=
9705                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9706                        DRAWSTATE_INVALID_RENDERPASS, "DS",
9707                        "Attachment %d is used by a later subpass and must be preserved in subpass %d.", attachment, index);
9708        }
9709    }
9710    return result;
9711}
9712
9713template <class T> bool isRangeOverlapping(T offset1, T size1, T offset2, T size2) {
9714    return (((offset1 + size1) > offset2) && ((offset1 + size1) < (offset2 + size2))) ||
9715           ((offset1 > offset2) && (offset1 < (offset2 + size2)));
9716}
9717
9718bool isRegionOverlapping(VkImageSubresourceRange range1, VkImageSubresourceRange range2) {
9719    return (isRangeOverlapping(range1.baseMipLevel, range1.levelCount, range2.baseMipLevel, range2.levelCount) &&
9720            isRangeOverlapping(range1.baseArrayLayer, range1.layerCount, range2.baseArrayLayer, range2.layerCount));
9721}
9722
9723static bool ValidateDependencies(const layer_data *dev_data, FRAMEBUFFER_STATE const *framebuffer,
9724                                 RENDER_PASS_NODE const *renderPass) {
9725    bool skip_call = false;
9726    auto const pFramebufferInfo = framebuffer->createInfo.ptr();
9727    auto const pCreateInfo = renderPass->createInfo.ptr();
9728    auto const & subpass_to_node = renderPass->subpassToNode;
9729    std::vector<std::vector<uint32_t>> output_attachment_to_subpass(pCreateInfo->attachmentCount);
9730    std::vector<std::vector<uint32_t>> input_attachment_to_subpass(pCreateInfo->attachmentCount);
9731    std::vector<std::vector<uint32_t>> overlapping_attachments(pCreateInfo->attachmentCount);
9732    // Find overlapping attachments
9733    for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
9734        for (uint32_t j = i + 1; j < pCreateInfo->attachmentCount; ++j) {
9735            VkImageView viewi = pFramebufferInfo->pAttachments[i];
9736            VkImageView viewj = pFramebufferInfo->pAttachments[j];
9737            if (viewi == viewj) {
9738                overlapping_attachments[i].push_back(j);
9739                overlapping_attachments[j].push_back(i);
9740                continue;
9741            }
9742            auto view_state_i = getImageViewState(dev_data, viewi);
9743            auto view_state_j = getImageViewState(dev_data, viewj);
9744            if (!view_state_i || !view_state_j) {
9745                continue;
9746            }
9747            auto view_ci_i = view_state_i->create_info;
9748            auto view_ci_j = view_state_j->create_info;
9749            if (view_ci_i.image == view_ci_j.image && isRegionOverlapping(view_ci_i.subresourceRange, view_ci_j.subresourceRange)) {
9750                overlapping_attachments[i].push_back(j);
9751                overlapping_attachments[j].push_back(i);
9752                continue;
9753            }
9754            auto image_data_i = getImageNode(dev_data, view_ci_i.image);
9755            auto image_data_j = getImageNode(dev_data, view_ci_j.image);
9756            if (!image_data_i || !image_data_j) {
9757                continue;
9758            }
9759            if (image_data_i->mem == image_data_j->mem && isRangeOverlapping(image_data_i->memOffset, image_data_i->memSize,
9760                                                                             image_data_j->memOffset, image_data_j->memSize)) {
9761                overlapping_attachments[i].push_back(j);
9762                overlapping_attachments[j].push_back(i);
9763            }
9764        }
9765    }
9766    for (uint32_t i = 0; i < overlapping_attachments.size(); ++i) {
9767        uint32_t attachment = i;
9768        for (auto other_attachment : overlapping_attachments[i]) {
9769            if (!(pCreateInfo->pAttachments[attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
9770                skip_call |=
9771                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9772                            DRAWSTATE_INVALID_RENDERPASS, "DS", "Attachment %d aliases attachment %d but doesn't "
9773                                                                "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
9774                            attachment, other_attachment);
9775            }
9776            if (!(pCreateInfo->pAttachments[other_attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
9777                skip_call |=
9778                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9779                            DRAWSTATE_INVALID_RENDERPASS, "DS", "Attachment %d aliases attachment %d but doesn't "
9780                                                                "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
9781                            other_attachment, attachment);
9782            }
9783        }
9784    }
9785    // Find for each attachment the subpasses that use them.
9786    unordered_set<uint32_t> attachmentIndices;
9787    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9788        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9789        attachmentIndices.clear();
9790        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9791            uint32_t attachment = subpass.pInputAttachments[j].attachment;
9792            if (attachment == VK_ATTACHMENT_UNUSED)
9793                continue;
9794            input_attachment_to_subpass[attachment].push_back(i);
9795            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
9796                input_attachment_to_subpass[overlapping_attachment].push_back(i);
9797            }
9798        }
9799        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9800            uint32_t attachment = subpass.pColorAttachments[j].attachment;
9801            if (attachment == VK_ATTACHMENT_UNUSED)
9802                continue;
9803            output_attachment_to_subpass[attachment].push_back(i);
9804            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
9805                output_attachment_to_subpass[overlapping_attachment].push_back(i);
9806            }
9807            attachmentIndices.insert(attachment);
9808        }
9809        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9810            uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
9811            output_attachment_to_subpass[attachment].push_back(i);
9812            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
9813                output_attachment_to_subpass[overlapping_attachment].push_back(i);
9814            }
9815
9816            if (attachmentIndices.count(attachment)) {
9817                skip_call |=
9818                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9819                            DRAWSTATE_INVALID_RENDERPASS, "DS",
9820                            "Cannot use same attachment (%u) as both color and depth output in same subpass (%u).", attachment, i);
9821            }
9822        }
9823    }
9824    // If there is a dependency needed make sure one exists
9825    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9826        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9827        // If the attachment is an input then all subpasses that output must have a dependency relationship
9828        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9829            uint32_t attachment = subpass.pInputAttachments[j].attachment;
9830            if (attachment == VK_ATTACHMENT_UNUSED)
9831                continue;
9832            CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9833        }
9834        // If the attachment is an output then all subpasses that use the attachment must have a dependency relationship
9835        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9836            uint32_t attachment = subpass.pColorAttachments[j].attachment;
9837            if (attachment == VK_ATTACHMENT_UNUSED)
9838                continue;
9839            CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9840            CheckDependencyExists(dev_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9841        }
9842        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9843            const uint32_t &attachment = subpass.pDepthStencilAttachment->attachment;
9844            CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9845            CheckDependencyExists(dev_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9846        }
9847    }
9848    // Loop through implicit dependencies, if this pass reads make sure the attachment is preserved for all passes after it was
9849    // written.
9850    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9851        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9852        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9853            CheckPreserved(dev_data, pCreateInfo, i, subpass.pInputAttachments[j].attachment, subpass_to_node, 0, skip_call);
9854        }
9855    }
9856    return skip_call;
9857}
9858// ValidateLayoutVsAttachmentDescription is a general function where we can validate various state associated with the
9859// VkAttachmentDescription structs that are used by the sub-passes of a renderpass. Initial check is to make sure that
9860// READ_ONLY layout attachments don't have CLEAR as their loadOp.
9861static bool ValidateLayoutVsAttachmentDescription(debug_report_data *report_data, const VkImageLayout first_layout,
9862                                                  const uint32_t attachment,
9863                                                  const VkAttachmentDescription &attachment_description) {
9864    bool skip_call = false;
9865    // Verify that initial loadOp on READ_ONLY attachments is not CLEAR
9866    if (attachment_description.loadOp == VK_ATTACHMENT_LOAD_OP_CLEAR) {
9867        if ((first_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) ||
9868            (first_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL)) {
9869            skip_call |=
9870                log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
9871                        VkDebugReportObjectTypeEXT(0), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9872                        "Cannot clear attachment %d with invalid first layout %s.", attachment, string_VkImageLayout(first_layout));
9873        }
9874    }
9875    return skip_call;
9876}
9877
9878static bool ValidateLayouts(const layer_data *dev_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo) {
9879    bool skip = false;
9880
9881    // Track when we're observing the first use of an attachment
9882    std::vector<bool> attach_first_use(pCreateInfo->attachmentCount, true);
9883    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9884        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9885        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9886            auto attach_index = subpass.pColorAttachments[j].attachment;
9887            if (attach_index == VK_ATTACHMENT_UNUSED)
9888                continue;
9889
9890            switch (subpass.pColorAttachments[j].layout) {
9891            case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
9892                /* This is ideal. */
9893                break;
9894
9895            case VK_IMAGE_LAYOUT_GENERAL:
9896                /* May not be optimal; TODO: reconsider this warning based on
9897                 * other constraints?
9898                 */
9899                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
9900                                VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9901                                "Layout for color attachment is GENERAL but should be COLOR_ATTACHMENT_OPTIMAL.");
9902                break;
9903
9904            default:
9905                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
9906                                __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9907                                "Layout for color attachment is %s but can only be COLOR_ATTACHMENT_OPTIMAL or GENERAL.",
9908                                string_VkImageLayout(subpass.pColorAttachments[j].layout));
9909            }
9910
9911            if (attach_first_use[attach_index]) {
9912                skip |= ValidateLayoutVsAttachmentDescription(dev_data->report_data, subpass.pColorAttachments[j].layout,
9913                                                              attach_index, pCreateInfo->pAttachments[attach_index]);
9914            }
9915            attach_first_use[attach_index] = false;
9916        }
9917        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9918            switch (subpass.pDepthStencilAttachment->layout) {
9919            case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
9920            case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
9921                /* These are ideal. */
9922                break;
9923
9924            case VK_IMAGE_LAYOUT_GENERAL:
9925                /* May not be optimal; TODO: reconsider this warning based on
9926                 * other constraints? GENERAL can be better than doing a bunch
9927                 * of transitions.
9928                 */
9929                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
9930                                VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9931                                "GENERAL layout for depth attachment may not give optimal performance.");
9932                break;
9933
9934            default:
9935                /* No other layouts are acceptable */
9936                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
9937                                __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9938                                "Layout for depth attachment is %s but can only be DEPTH_STENCIL_ATTACHMENT_OPTIMAL, "
9939                                "DEPTH_STENCIL_READ_ONLY_OPTIMAL or GENERAL.",
9940                                string_VkImageLayout(subpass.pDepthStencilAttachment->layout));
9941            }
9942
9943            auto attach_index = subpass.pDepthStencilAttachment->attachment;
9944            if (attach_first_use[attach_index]) {
9945                skip |= ValidateLayoutVsAttachmentDescription(dev_data->report_data, subpass.pDepthStencilAttachment->layout,
9946                                                              attach_index, pCreateInfo->pAttachments[attach_index]);
9947            }
9948            attach_first_use[attach_index] = false;
9949        }
9950        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9951            auto attach_index = subpass.pInputAttachments[j].attachment;
9952            if (attach_index == VK_ATTACHMENT_UNUSED)
9953                continue;
9954
9955            switch (subpass.pInputAttachments[j].layout) {
9956            case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
9957            case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
9958                /* These are ideal. */
9959                break;
9960
9961            case VK_IMAGE_LAYOUT_GENERAL:
9962                /* May not be optimal. TODO: reconsider this warning based on
9963                 * other constraints.
9964                 */
9965                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
9966                                VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9967                                "Layout for input attachment is GENERAL but should be READ_ONLY_OPTIMAL.");
9968                break;
9969
9970            default:
9971                /* No other layouts are acceptable */
9972                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9973                                DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9974                                "Layout for input attachment is %s but can only be READ_ONLY_OPTIMAL or GENERAL.",
9975                                string_VkImageLayout(subpass.pInputAttachments[j].layout));
9976            }
9977
9978            if (attach_first_use[attach_index]) {
9979                skip |= ValidateLayoutVsAttachmentDescription(dev_data->report_data, subpass.pInputAttachments[j].layout,
9980                                                              attach_index, pCreateInfo->pAttachments[attach_index]);
9981            }
9982            attach_first_use[attach_index] = false;
9983        }
9984    }
9985    return skip;
9986}
9987
9988static bool CreatePassDAG(const layer_data *dev_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
9989                          std::vector<DAGNode> &subpass_to_node, std::vector<bool> &has_self_dependency) {
9990    bool skip_call = false;
9991    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9992        DAGNode &subpass_node = subpass_to_node[i];
9993        subpass_node.pass = i;
9994    }
9995    for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
9996        const VkSubpassDependency &dependency = pCreateInfo->pDependencies[i];
9997        if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL || dependency.dstSubpass == VK_SUBPASS_EXTERNAL) {
9998            if (dependency.srcSubpass == dependency.dstSubpass) {
9999                skip_call |=
10000                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10001                            DRAWSTATE_INVALID_RENDERPASS, "DS", "The src and dest subpasses cannot both be external.");
10002            }
10003
10004            // We don't want to add edges to the DAG for dependencies to/from
10005            // VK_SUBPASS_EXTERNAL. We don't use them for anything, and their
10006            // presence complicates other code.
10007            continue;
10008        } else if (dependency.srcSubpass > dependency.dstSubpass) {
10009            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10010                                 DRAWSTATE_INVALID_RENDERPASS, "DS",
10011                                 "Depedency graph must be specified such that an earlier pass cannot depend on a later pass.");
10012        } else if (dependency.srcSubpass == dependency.dstSubpass) {
10013            has_self_dependency[dependency.srcSubpass] = true;
10014        }
10015
10016        subpass_to_node[dependency.dstSubpass].prev.push_back(dependency.srcSubpass);
10017        subpass_to_node[dependency.srcSubpass].next.push_back(dependency.dstSubpass);
10018    }
10019    return skip_call;
10020}
10021
10022
10023VKAPI_ATTR VkResult VKAPI_CALL CreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo,
10024                                                  const VkAllocationCallbacks *pAllocator,
10025                                                  VkShaderModule *pShaderModule) {
10026    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10027    bool skip_call = false;
10028
10029    /* Use SPIRV-Tools validator to try and catch any issues with the module itself */
10030    spv_context ctx = spvContextCreate(SPV_ENV_VULKAN_1_0);
10031    spv_const_binary_t binary { pCreateInfo->pCode, pCreateInfo->codeSize / sizeof(uint32_t) };
10032    spv_diagnostic diag = nullptr;
10033
10034    auto result = spvValidate(ctx, &binary, &diag);
10035    if (result != SPV_SUCCESS) {
10036        skip_call |=
10037            log_msg(dev_data->report_data, result == SPV_WARNING ? VK_DEBUG_REPORT_WARNING_BIT_EXT : VK_DEBUG_REPORT_ERROR_BIT_EXT,
10038                    VkDebugReportObjectTypeEXT(0), 0, __LINE__, SHADER_CHECKER_INCONSISTENT_SPIRV, "SC",
10039                    "SPIR-V module not valid: %s", diag && diag->error ? diag->error : "(no error text)");
10040    }
10041
10042    spvDiagnosticDestroy(diag);
10043    spvContextDestroy(ctx);
10044
10045    if (skip_call)
10046        return VK_ERROR_VALIDATION_FAILED_EXT;
10047
10048    VkResult res = dev_data->dispatch_table.CreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule);
10049
10050    if (res == VK_SUCCESS) {
10051        std::lock_guard<std::mutex> lock(global_lock);
10052        dev_data->shaderModuleMap[*pShaderModule] = unique_ptr<shader_module>(new shader_module(pCreateInfo));
10053    }
10054    return res;
10055}
10056
10057static bool ValidateAttachmentIndex(layer_data *dev_data, uint32_t attachment, uint32_t attachment_count, const char *type) {
10058    bool skip_call = false;
10059    if (attachment >= attachment_count && attachment != VK_ATTACHMENT_UNUSED) {
10060        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10061                             DRAWSTATE_INVALID_ATTACHMENT_INDEX, "DS",
10062                             "CreateRenderPass: %s attachment %d cannot be greater than the total number of attachments %d.",
10063                             type, attachment, attachment_count);
10064    }
10065    return skip_call;
10066}
10067
10068static bool IsPowerOfTwo(unsigned x) {
10069    return x && !(x & (x-1));
10070}
10071
10072static bool ValidateRenderpassAttachmentUsage(layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo) {
10073    bool skip_call = false;
10074    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
10075        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
10076        if (subpass.pipelineBindPoint != VK_PIPELINE_BIND_POINT_GRAPHICS) {
10077            skip_call |=
10078                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10079                        DRAWSTATE_INVALID_RENDERPASS, "DS",
10080                        "CreateRenderPass: Pipeline bind point for subpass %d must be VK_PIPELINE_BIND_POINT_GRAPHICS.", i);
10081        }
10082        for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
10083            uint32_t attachment = subpass.pPreserveAttachments[j];
10084            if (attachment == VK_ATTACHMENT_UNUSED) {
10085                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
10086                                     __LINE__, DRAWSTATE_INVALID_ATTACHMENT_INDEX, "DS",
10087                                     "CreateRenderPass:  Preserve attachment (%d) must not be VK_ATTACHMENT_UNUSED.", j);
10088            } else {
10089                skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Preserve");
10090            }
10091        }
10092
10093        auto subpass_performs_resolve = subpass.pResolveAttachments && std::any_of(
10094            subpass.pResolveAttachments, subpass.pResolveAttachments + subpass.colorAttachmentCount,
10095            [](VkAttachmentReference ref) { return ref.attachment != VK_ATTACHMENT_UNUSED; });
10096
10097        unsigned sample_count = 0;
10098
10099        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
10100            uint32_t attachment;
10101            if (subpass.pResolveAttachments) {
10102                attachment = subpass.pResolveAttachments[j].attachment;
10103                skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Resolve");
10104
10105                if (!skip_call && attachment != VK_ATTACHMENT_UNUSED &&
10106                    pCreateInfo->pAttachments[attachment].samples != VK_SAMPLE_COUNT_1_BIT) {
10107                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
10108                                         __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
10109                                         "CreateRenderPass:  Subpass %u requests multisample resolve into attachment %u, "
10110                                         "which must have VK_SAMPLE_COUNT_1_BIT but has %s",
10111                                         i, attachment, string_VkSampleCountFlagBits(pCreateInfo->pAttachments[attachment].samples));
10112                }
10113            }
10114            attachment = subpass.pColorAttachments[j].attachment;
10115            skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Color");
10116
10117            if (!skip_call && attachment != VK_ATTACHMENT_UNUSED) {
10118                sample_count |= (unsigned)pCreateInfo->pAttachments[attachment].samples;
10119
10120                if (subpass_performs_resolve &&
10121                    pCreateInfo->pAttachments[attachment].samples == VK_SAMPLE_COUNT_1_BIT) {
10122                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
10123                                         __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
10124                                         "CreateRenderPass:  Subpass %u requests multisample resolve from attachment %u "
10125                                         "which has VK_SAMPLE_COUNT_1_BIT",
10126                                         i, attachment);
10127                }
10128            }
10129        }
10130
10131        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
10132            uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
10133            skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Depth stencil");
10134
10135            if (!skip_call && attachment != VK_ATTACHMENT_UNUSED) {
10136                sample_count |= (unsigned)pCreateInfo->pAttachments[attachment].samples;
10137            }
10138        }
10139
10140        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
10141            uint32_t attachment = subpass.pInputAttachments[j].attachment;
10142            skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Input");
10143        }
10144
10145        if (sample_count && !IsPowerOfTwo(sample_count)) {
10146            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
10147                                 __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
10148                                 "CreateRenderPass:  Subpass %u attempts to render to "
10149                                 "attachments with inconsistent sample counts",
10150                                 i);
10151        }
10152    }
10153    return skip_call;
10154}
10155
10156VKAPI_ATTR VkResult VKAPI_CALL CreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
10157                                                const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) {
10158    bool skip_call = false;
10159    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10160
10161    std::unique_lock<std::mutex> lock(global_lock);
10162
10163    skip_call |= ValidateLayouts(dev_data, device, pCreateInfo);
10164    // TODO: As part of wrapping up the mem_tracker/core_validation merge the following routine should be consolidated with
10165    //       ValidateLayouts.
10166    skip_call |= ValidateRenderpassAttachmentUsage(dev_data, pCreateInfo);
10167    lock.unlock();
10168
10169    if (skip_call) {
10170        return VK_ERROR_VALIDATION_FAILED_EXT;
10171    }
10172
10173    VkResult result = dev_data->dispatch_table.CreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass);
10174
10175    if (VK_SUCCESS == result) {
10176        lock.lock();
10177
10178        std::vector<bool> has_self_dependency(pCreateInfo->subpassCount);
10179        std::vector<DAGNode> subpass_to_node(pCreateInfo->subpassCount);
10180        skip_call |= CreatePassDAG(dev_data, device, pCreateInfo, subpass_to_node, has_self_dependency);
10181
10182        auto render_pass = unique_ptr<RENDER_PASS_NODE>(new RENDER_PASS_NODE(pCreateInfo));
10183        render_pass->renderPass = *pRenderPass;
10184        render_pass->hasSelfDependency = has_self_dependency;
10185        render_pass->subpassToNode = subpass_to_node;
10186
10187        // TODO: Maybe fill list and then copy instead of locking
10188        std::unordered_map<uint32_t, bool> &attachment_first_read = render_pass->attachment_first_read;
10189        std::unordered_map<uint32_t, VkImageLayout> &attachment_first_layout = render_pass->attachment_first_layout;
10190        for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
10191            const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
10192            for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
10193                uint32_t attachment = subpass.pColorAttachments[j].attachment;
10194                if (!attachment_first_read.count(attachment)) {
10195                    attachment_first_read.insert(std::make_pair(attachment, false));
10196                    attachment_first_layout.insert(std::make_pair(attachment, subpass.pColorAttachments[j].layout));
10197                }
10198            }
10199            if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
10200                uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
10201                if (!attachment_first_read.count(attachment)) {
10202                    attachment_first_read.insert(std::make_pair(attachment, false));
10203                    attachment_first_layout.insert(std::make_pair(attachment, subpass.pDepthStencilAttachment->layout));
10204                }
10205            }
10206            for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
10207                uint32_t attachment = subpass.pInputAttachments[j].attachment;
10208                if (!attachment_first_read.count(attachment)) {
10209                    attachment_first_read.insert(std::make_pair(attachment, true));
10210                    attachment_first_layout.insert(std::make_pair(attachment, subpass.pInputAttachments[j].layout));
10211                }
10212            }
10213        }
10214
10215        dev_data->renderPassMap[*pRenderPass] = std::move(render_pass);
10216    }
10217    return result;
10218}
10219
10220static bool VerifyFramebufferAndRenderPassLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin) {
10221    bool skip_call = false;
10222    auto const pRenderPassInfo = getRenderPass(dev_data, pRenderPassBegin->renderPass)->createInfo.ptr();
10223    auto const & framebufferInfo = dev_data->frameBufferMap[pRenderPassBegin->framebuffer]->createInfo;
10224    if (pRenderPassInfo->attachmentCount != framebufferInfo.attachmentCount) {
10225        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10226                             DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot start a render pass using a framebuffer "
10227                                                                 "with a different number of attachments.");
10228    }
10229    for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
10230        const VkImageView &image_view = framebufferInfo.pAttachments[i];
10231        auto view_state = getImageViewState(dev_data, image_view);
10232        assert(view_state);
10233        const VkImage &image = view_state->create_info.image;
10234        const VkImageSubresourceRange &subRange = view_state->create_info.subresourceRange;
10235        IMAGE_CMD_BUF_LAYOUT_NODE newNode = {pRenderPassInfo->pAttachments[i].initialLayout,
10236                                             pRenderPassInfo->pAttachments[i].initialLayout};
10237        // TODO: Do not iterate over every possibility - consolidate where possible
10238        for (uint32_t j = 0; j < subRange.levelCount; j++) {
10239            uint32_t level = subRange.baseMipLevel + j;
10240            for (uint32_t k = 0; k < subRange.layerCount; k++) {
10241                uint32_t layer = subRange.baseArrayLayer + k;
10242                VkImageSubresource sub = {subRange.aspectMask, level, layer};
10243                IMAGE_CMD_BUF_LAYOUT_NODE node;
10244                if (!FindLayout(pCB, image, sub, node)) {
10245                    SetLayout(pCB, image, sub, newNode);
10246                    continue;
10247                }
10248                if (newNode.layout != VK_IMAGE_LAYOUT_UNDEFINED &&
10249                    newNode.layout != node.layout) {
10250                    skip_call |=
10251                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10252                                DRAWSTATE_INVALID_RENDERPASS, "DS",
10253                                "You cannot start a render pass using attachment %u "
10254                                "where the render pass initial layout is %s and the previous "
10255                                "known layout of the attachment is %s. The layouts must match, or "
10256                                "the render pass initial layout for the attachment must be "
10257                                "VK_IMAGE_LAYOUT_UNDEFINED",
10258                                i, string_VkImageLayout(newNode.layout), string_VkImageLayout(node.layout));
10259                }
10260            }
10261        }
10262    }
10263    return skip_call;
10264}
10265
10266static void TransitionAttachmentRefLayout(layer_data *dev_data, GLOBAL_CB_NODE *pCB, FRAMEBUFFER_STATE *pFramebuffer,
10267                                          VkAttachmentReference ref) {
10268    if (ref.attachment != VK_ATTACHMENT_UNUSED) {
10269        auto image_view = pFramebuffer->createInfo.pAttachments[ref.attachment];
10270        SetLayout(dev_data, pCB, image_view, ref.layout);
10271    }
10272}
10273
10274static void TransitionSubpassLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin,
10275                                     const int subpass_index) {
10276    auto renderPass = getRenderPass(dev_data, pRenderPassBegin->renderPass);
10277    if (!renderPass)
10278        return;
10279
10280    auto framebuffer = getFramebufferState(dev_data, pRenderPassBegin->framebuffer);
10281    if (!framebuffer)
10282        return;
10283
10284    auto const &subpass = renderPass->createInfo.pSubpasses[subpass_index];
10285    for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
10286        TransitionAttachmentRefLayout(dev_data, pCB, framebuffer, subpass.pInputAttachments[j]);
10287    }
10288    for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
10289        TransitionAttachmentRefLayout(dev_data, pCB, framebuffer, subpass.pColorAttachments[j]);
10290    }
10291    if (subpass.pDepthStencilAttachment) {
10292        TransitionAttachmentRefLayout(dev_data, pCB, framebuffer, *subpass.pDepthStencilAttachment);
10293    }
10294}
10295
10296static bool validatePrimaryCommandBuffer(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const std::string &cmd_name) {
10297    bool skip_call = false;
10298    if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
10299        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10300                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", "Cannot execute command %s on a secondary command buffer.",
10301                             cmd_name.c_str());
10302    }
10303    return skip_call;
10304}
10305
10306static void TransitionFinalSubpassLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin) {
10307    auto renderPass = getRenderPass(dev_data, pRenderPassBegin->renderPass);
10308    if (!renderPass)
10309        return;
10310
10311    const VkRenderPassCreateInfo *pRenderPassInfo = renderPass->createInfo.ptr();
10312    auto framebuffer = getFramebufferState(dev_data, pRenderPassBegin->framebuffer);
10313    if (!framebuffer)
10314        return;
10315
10316    for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
10317        auto image_view = framebuffer->createInfo.pAttachments[i];
10318        SetLayout(dev_data, pCB, image_view, pRenderPassInfo->pAttachments[i].finalLayout);
10319    }
10320}
10321
10322static bool VerifyRenderAreaBounds(const layer_data *dev_data, const VkRenderPassBeginInfo *pRenderPassBegin) {
10323    bool skip_call = false;
10324    const safe_VkFramebufferCreateInfo *pFramebufferInfo =
10325        &getFramebufferState(dev_data, pRenderPassBegin->framebuffer)->createInfo;
10326    if (pRenderPassBegin->renderArea.offset.x < 0 ||
10327        (pRenderPassBegin->renderArea.offset.x + pRenderPassBegin->renderArea.extent.width) > pFramebufferInfo->width ||
10328        pRenderPassBegin->renderArea.offset.y < 0 ||
10329        (pRenderPassBegin->renderArea.offset.y + pRenderPassBegin->renderArea.extent.height) > pFramebufferInfo->height) {
10330        skip_call |= static_cast<bool>(log_msg(
10331            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10332            DRAWSTATE_INVALID_RENDER_AREA, "CORE",
10333            "Cannot execute a render pass with renderArea not within the bound of the "
10334            "framebuffer. RenderArea: x %d, y %d, width %d, height %d. Framebuffer: width %d, "
10335            "height %d.",
10336            pRenderPassBegin->renderArea.offset.x, pRenderPassBegin->renderArea.offset.y, pRenderPassBegin->renderArea.extent.width,
10337            pRenderPassBegin->renderArea.extent.height, pFramebufferInfo->width, pFramebufferInfo->height));
10338    }
10339    return skip_call;
10340}
10341
10342// If this is a stencil format, make sure the stencil[Load|Store]Op flag is checked, while if it is a depth/color attachment the
10343// [load|store]Op flag must be checked
10344// TODO: The memory valid flag in DEVICE_MEM_INFO should probably be split to track the validity of stencil memory separately.
10345template <typename T> static bool FormatSpecificLoadAndStoreOpSettings(VkFormat format, T color_depth_op, T stencil_op, T op) {
10346    if (color_depth_op != op && stencil_op != op) {
10347        return false;
10348    }
10349    bool check_color_depth_load_op = !vk_format_is_stencil_only(format);
10350    bool check_stencil_load_op = vk_format_is_depth_and_stencil(format) || !check_color_depth_load_op;
10351
10352    return (((check_color_depth_load_op == true) && (color_depth_op == op)) ||
10353            ((check_stencil_load_op == true) && (stencil_op == op)));
10354}
10355
10356VKAPI_ATTR void VKAPI_CALL
10357CmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, VkSubpassContents contents) {
10358    bool skip_call = false;
10359    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
10360    std::unique_lock<std::mutex> lock(global_lock);
10361    GLOBAL_CB_NODE *cb_node = getCBNode(dev_data, commandBuffer);
10362    auto renderPass = pRenderPassBegin ? getRenderPass(dev_data, pRenderPassBegin->renderPass) : nullptr;
10363    auto framebuffer = pRenderPassBegin ? getFramebufferState(dev_data, pRenderPassBegin->framebuffer) : nullptr;
10364    if (cb_node) {
10365        if (renderPass) {
10366            uint32_t clear_op_size = 0; // Make sure pClearValues is at least as large as last LOAD_OP_CLEAR
10367            cb_node->activeFramebuffer = pRenderPassBegin->framebuffer;
10368            for (uint32_t i = 0; i < renderPass->createInfo.attachmentCount; ++i) {
10369                MT_FB_ATTACHMENT_INFO &fb_info = framebuffer->attachments[i];
10370                auto pAttachment = &renderPass->createInfo.pAttachments[i];
10371                if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp,
10372                                                         pAttachment->stencilLoadOp,
10373                                                         VK_ATTACHMENT_LOAD_OP_CLEAR)) {
10374                    clear_op_size = static_cast<uint32_t>(i) + 1;
10375                    std::function<bool()> function = [=]() {
10376                        SetImageMemoryValid(dev_data, getImageNode(dev_data, fb_info.image), true);
10377                        return false;
10378                    };
10379                    cb_node->validate_functions.push_back(function);
10380                } else if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp,
10381                                                                pAttachment->stencilLoadOp,
10382                                                                VK_ATTACHMENT_LOAD_OP_DONT_CARE)) {
10383                    std::function<bool()> function = [=]() {
10384                        SetImageMemoryValid(dev_data, getImageNode(dev_data, fb_info.image), false);
10385                        return false;
10386                    };
10387                    cb_node->validate_functions.push_back(function);
10388                } else if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp,
10389                                                                pAttachment->stencilLoadOp,
10390                                                                VK_ATTACHMENT_LOAD_OP_LOAD)) {
10391                    std::function<bool()> function = [=]() {
10392                        return ValidateImageMemoryIsValid(dev_data, getImageNode(dev_data, fb_info.image),
10393                                                          "vkCmdBeginRenderPass()");
10394                    };
10395                    cb_node->validate_functions.push_back(function);
10396                }
10397                if (renderPass->attachment_first_read[i]) {
10398                    std::function<bool()> function = [=]() {
10399                        return ValidateImageMemoryIsValid(dev_data, getImageNode(dev_data, fb_info.image),
10400                                                          "vkCmdBeginRenderPass()");
10401                    };
10402                    cb_node->validate_functions.push_back(function);
10403                }
10404            }
10405            if (clear_op_size > pRenderPassBegin->clearValueCount) {
10406                skip_call |=
10407                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
10408                            reinterpret_cast<uint64_t &>(renderPass), __LINE__, VALIDATION_ERROR_00442, "DS",
10409                            "In vkCmdBeginRenderPass() the VkRenderPassBeginInfo struct has a clearValueCount of %u but there must "
10410                            "be at least %u "
10411                            "entries in pClearValues array to account for the highest index attachment in renderPass 0x%" PRIx64
10412                            " that uses VK_ATTACHMENT_LOAD_OP_CLEAR is %u. Note that the pClearValues array "
10413                            "is indexed by attachment number so even if some pClearValues entries between 0 and %u correspond to "
10414                            "attachments that aren't cleared they will be ignored. %s",
10415                            pRenderPassBegin->clearValueCount, clear_op_size, reinterpret_cast<uint64_t &>(renderPass),
10416                            clear_op_size, clear_op_size - 1, validation_error_map[VALIDATION_ERROR_00442]);
10417            }
10418            skip_call |= VerifyRenderAreaBounds(dev_data, pRenderPassBegin);
10419            skip_call |= VerifyFramebufferAndRenderPassLayouts(dev_data, cb_node, pRenderPassBegin);
10420            skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdBeginRenderPass");
10421            skip_call |= ValidateDependencies(dev_data, framebuffer, renderPass);
10422            skip_call |= validatePrimaryCommandBuffer(dev_data, cb_node, "vkCmdBeginRenderPass");
10423            skip_call |= addCmd(dev_data, cb_node, CMD_BEGINRENDERPASS, "vkCmdBeginRenderPass()");
10424            cb_node->activeRenderPass = renderPass;
10425            // This is a shallow copy as that is all that is needed for now
10426            cb_node->activeRenderPassBeginInfo = *pRenderPassBegin;
10427            cb_node->activeSubpass = 0;
10428            cb_node->activeSubpassContents = contents;
10429            cb_node->framebuffers.insert(pRenderPassBegin->framebuffer);
10430            // Connect this framebuffer and its children to this cmdBuffer
10431            AddFramebufferBinding(dev_data, cb_node, framebuffer);
10432            // transition attachments to the correct layouts for the first subpass
10433            TransitionSubpassLayouts(dev_data, cb_node, &cb_node->activeRenderPassBeginInfo, cb_node->activeSubpass);
10434        } else {
10435            skip_call |=
10436                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10437                        DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot use a NULL RenderPass object in vkCmdBeginRenderPass()");
10438        }
10439    }
10440    lock.unlock();
10441    if (!skip_call) {
10442        dev_data->dispatch_table.CmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
10443    }
10444}
10445
10446VKAPI_ATTR void VKAPI_CALL CmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
10447    bool skip_call = false;
10448    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
10449    std::unique_lock<std::mutex> lock(global_lock);
10450    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
10451    if (pCB) {
10452        skip_call |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdNextSubpass");
10453        skip_call |= addCmd(dev_data, pCB, CMD_NEXTSUBPASS, "vkCmdNextSubpass()");
10454        skip_call |= outsideRenderPass(dev_data, pCB, "vkCmdNextSubpass");
10455
10456        auto subpassCount = pCB->activeRenderPass->createInfo.subpassCount;
10457        if (pCB->activeSubpass == subpassCount - 1) {
10458            skip_call |=
10459                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10460                        reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_INVALID_SUBPASS_INDEX, "DS",
10461                        "vkCmdNextSubpass(): Attempted to advance beyond final subpass");
10462        }
10463    }
10464    lock.unlock();
10465
10466    if (skip_call)
10467        return;
10468
10469    dev_data->dispatch_table.CmdNextSubpass(commandBuffer, contents);
10470
10471    if (pCB) {
10472      lock.lock();
10473      pCB->activeSubpass++;
10474      pCB->activeSubpassContents = contents;
10475      TransitionSubpassLayouts(dev_data, pCB, &pCB->activeRenderPassBeginInfo, pCB->activeSubpass);
10476    }
10477}
10478
10479VKAPI_ATTR void VKAPI_CALL CmdEndRenderPass(VkCommandBuffer commandBuffer) {
10480    bool skip_call = false;
10481    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
10482    std::unique_lock<std::mutex> lock(global_lock);
10483    auto pCB = getCBNode(dev_data, commandBuffer);
10484    if (pCB) {
10485        RENDER_PASS_NODE* pRPNode = pCB->activeRenderPass;
10486        auto framebuffer = getFramebufferState(dev_data, pCB->activeFramebuffer);
10487        if (pRPNode) {
10488            if (pCB->activeSubpass != pRPNode->createInfo.subpassCount - 1) {
10489                skip_call |=
10490                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10491                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_INVALID_SUBPASS_INDEX, "DS",
10492                            "vkCmdEndRenderPass(): Called before reaching final subpass");
10493            }
10494
10495            for (size_t i = 0; i < pRPNode->createInfo.attachmentCount; ++i) {
10496                MT_FB_ATTACHMENT_INFO &fb_info = framebuffer->attachments[i];
10497                auto pAttachment = &pRPNode->createInfo.pAttachments[i];
10498                if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->storeOp,
10499                                                         pAttachment->stencilStoreOp, VK_ATTACHMENT_STORE_OP_STORE)) {
10500                    std::function<bool()> function = [=]() {
10501                        SetImageMemoryValid(dev_data, getImageNode(dev_data, fb_info.image), true);
10502                        return false;
10503                    };
10504                    pCB->validate_functions.push_back(function);
10505                } else if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->storeOp,
10506                                                                pAttachment->stencilStoreOp,
10507                                                                VK_ATTACHMENT_STORE_OP_DONT_CARE)) {
10508                    std::function<bool()> function = [=]() {
10509                        SetImageMemoryValid(dev_data, getImageNode(dev_data, fb_info.image), false);
10510                        return false;
10511                    };
10512                    pCB->validate_functions.push_back(function);
10513                }
10514            }
10515        }
10516        skip_call |= outsideRenderPass(dev_data, pCB, "vkCmdEndRenderpass");
10517        skip_call |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdEndRenderPass");
10518        skip_call |= addCmd(dev_data, pCB, CMD_ENDRENDERPASS, "vkCmdEndRenderPass()");
10519    }
10520    lock.unlock();
10521
10522    if (skip_call)
10523        return;
10524
10525    dev_data->dispatch_table.CmdEndRenderPass(commandBuffer);
10526
10527    if (pCB) {
10528        lock.lock();
10529        TransitionFinalSubpassLayouts(dev_data, pCB, &pCB->activeRenderPassBeginInfo);
10530        pCB->activeRenderPass = nullptr;
10531        pCB->activeSubpass = 0;
10532        pCB->activeFramebuffer = VK_NULL_HANDLE;
10533    }
10534}
10535
10536static bool logInvalidAttachmentMessage(layer_data *dev_data, VkCommandBuffer secondaryBuffer, uint32_t primaryAttach,
10537                                        uint32_t secondaryAttach, const char *msg) {
10538    return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10539                   DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10540                   "vkCmdExecuteCommands() called w/ invalid Secondary Cmd Buffer 0x%" PRIx64 " which has a render pass "
10541                   "that is not compatible with the Primary Cmd Buffer current render pass. "
10542                   "Attachment %u is not compatible with %u: %s",
10543                   reinterpret_cast<uint64_t &>(secondaryBuffer), primaryAttach, secondaryAttach, msg);
10544}
10545
10546static bool validateAttachmentCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer,
10547                                            VkRenderPassCreateInfo const *primaryPassCI, uint32_t primaryAttach,
10548                                            VkCommandBuffer secondaryBuffer, VkRenderPassCreateInfo const *secondaryPassCI,
10549                                            uint32_t secondaryAttach, bool is_multi) {
10550    bool skip_call = false;
10551    if (primaryPassCI->attachmentCount <= primaryAttach) {
10552        primaryAttach = VK_ATTACHMENT_UNUSED;
10553    }
10554    if (secondaryPassCI->attachmentCount <= secondaryAttach) {
10555        secondaryAttach = VK_ATTACHMENT_UNUSED;
10556    }
10557    if (primaryAttach == VK_ATTACHMENT_UNUSED && secondaryAttach == VK_ATTACHMENT_UNUSED) {
10558        return skip_call;
10559    }
10560    if (primaryAttach == VK_ATTACHMENT_UNUSED) {
10561        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach,
10562                                                 "The first is unused while the second is not.");
10563        return skip_call;
10564    }
10565    if (secondaryAttach == VK_ATTACHMENT_UNUSED) {
10566        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach,
10567                                                 "The second is unused while the first is not.");
10568        return skip_call;
10569    }
10570    if (primaryPassCI->pAttachments[primaryAttach].format != secondaryPassCI->pAttachments[secondaryAttach].format) {
10571        skip_call |=
10572            logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different formats.");
10573    }
10574    if (primaryPassCI->pAttachments[primaryAttach].samples != secondaryPassCI->pAttachments[secondaryAttach].samples) {
10575        skip_call |=
10576            logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different samples.");
10577    }
10578    if (is_multi && primaryPassCI->pAttachments[primaryAttach].flags != secondaryPassCI->pAttachments[secondaryAttach].flags) {
10579        skip_call |=
10580            logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different flags.");
10581    }
10582    return skip_call;
10583}
10584
10585static bool validateSubpassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer,
10586                                         VkRenderPassCreateInfo const *primaryPassCI, VkCommandBuffer secondaryBuffer,
10587                                         VkRenderPassCreateInfo const *secondaryPassCI, const int subpass, bool is_multi) {
10588    bool skip_call = false;
10589    const VkSubpassDescription &primary_desc = primaryPassCI->pSubpasses[subpass];
10590    const VkSubpassDescription &secondary_desc = secondaryPassCI->pSubpasses[subpass];
10591    uint32_t maxInputAttachmentCount = std::max(primary_desc.inputAttachmentCount, secondary_desc.inputAttachmentCount);
10592    for (uint32_t i = 0; i < maxInputAttachmentCount; ++i) {
10593        uint32_t primary_input_attach = VK_ATTACHMENT_UNUSED, secondary_input_attach = VK_ATTACHMENT_UNUSED;
10594        if (i < primary_desc.inputAttachmentCount) {
10595            primary_input_attach = primary_desc.pInputAttachments[i].attachment;
10596        }
10597        if (i < secondary_desc.inputAttachmentCount) {
10598            secondary_input_attach = secondary_desc.pInputAttachments[i].attachment;
10599        }
10600        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_input_attach, secondaryBuffer,
10601                                                     secondaryPassCI, secondary_input_attach, is_multi);
10602    }
10603    uint32_t maxColorAttachmentCount = std::max(primary_desc.colorAttachmentCount, secondary_desc.colorAttachmentCount);
10604    for (uint32_t i = 0; i < maxColorAttachmentCount; ++i) {
10605        uint32_t primary_color_attach = VK_ATTACHMENT_UNUSED, secondary_color_attach = VK_ATTACHMENT_UNUSED;
10606        if (i < primary_desc.colorAttachmentCount) {
10607            primary_color_attach = primary_desc.pColorAttachments[i].attachment;
10608        }
10609        if (i < secondary_desc.colorAttachmentCount) {
10610            secondary_color_attach = secondary_desc.pColorAttachments[i].attachment;
10611        }
10612        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_color_attach, secondaryBuffer,
10613                                                     secondaryPassCI, secondary_color_attach, is_multi);
10614        uint32_t primary_resolve_attach = VK_ATTACHMENT_UNUSED, secondary_resolve_attach = VK_ATTACHMENT_UNUSED;
10615        if (i < primary_desc.colorAttachmentCount && primary_desc.pResolveAttachments) {
10616            primary_resolve_attach = primary_desc.pResolveAttachments[i].attachment;
10617        }
10618        if (i < secondary_desc.colorAttachmentCount && secondary_desc.pResolveAttachments) {
10619            secondary_resolve_attach = secondary_desc.pResolveAttachments[i].attachment;
10620        }
10621        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_resolve_attach,
10622                                                     secondaryBuffer, secondaryPassCI, secondary_resolve_attach, is_multi);
10623    }
10624    uint32_t primary_depthstencil_attach = VK_ATTACHMENT_UNUSED, secondary_depthstencil_attach = VK_ATTACHMENT_UNUSED;
10625    if (primary_desc.pDepthStencilAttachment) {
10626        primary_depthstencil_attach = primary_desc.pDepthStencilAttachment[0].attachment;
10627    }
10628    if (secondary_desc.pDepthStencilAttachment) {
10629        secondary_depthstencil_attach = secondary_desc.pDepthStencilAttachment[0].attachment;
10630    }
10631    skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_depthstencil_attach,
10632                                                 secondaryBuffer, secondaryPassCI, secondary_depthstencil_attach, is_multi);
10633    return skip_call;
10634}
10635
10636// Verify that given renderPass CreateInfo for primary and secondary command buffers are compatible.
10637//  This function deals directly with the CreateInfo, there are overloaded versions below that can take the renderPass handle and
10638//  will then feed into this function
10639static bool validateRenderPassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer,
10640                                            VkRenderPassCreateInfo const *primaryPassCI, VkCommandBuffer secondaryBuffer,
10641                                            VkRenderPassCreateInfo const *secondaryPassCI) {
10642    bool skip_call = false;
10643
10644    if (primaryPassCI->subpassCount != secondaryPassCI->subpassCount) {
10645        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10646                             DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10647                             "vkCmdExecuteCommands() called w/ invalid secondary Cmd Buffer 0x%" PRIx64
10648                             " that has a subpassCount of %u that is incompatible with the primary Cmd Buffer 0x%" PRIx64
10649                             " that has a subpassCount of %u.",
10650                             reinterpret_cast<uint64_t &>(secondaryBuffer), secondaryPassCI->subpassCount,
10651                             reinterpret_cast<uint64_t &>(primaryBuffer), primaryPassCI->subpassCount);
10652    } else {
10653        for (uint32_t i = 0; i < primaryPassCI->subpassCount; ++i) {
10654            skip_call |= validateSubpassCompatibility(dev_data, primaryBuffer, primaryPassCI, secondaryBuffer, secondaryPassCI, i,
10655                                                      primaryPassCI->subpassCount > 1);
10656        }
10657    }
10658    return skip_call;
10659}
10660
10661static bool validateFramebuffer(layer_data *dev_data, VkCommandBuffer primaryBuffer, const GLOBAL_CB_NODE *pCB,
10662                                VkCommandBuffer secondaryBuffer, const GLOBAL_CB_NODE *pSubCB) {
10663    bool skip_call = false;
10664    if (!pSubCB->beginInfo.pInheritanceInfo) {
10665        return skip_call;
10666    }
10667    VkFramebuffer primary_fb = pCB->activeFramebuffer;
10668    VkFramebuffer secondary_fb = pSubCB->beginInfo.pInheritanceInfo->framebuffer;
10669    if (secondary_fb != VK_NULL_HANDLE) {
10670        if (primary_fb != secondary_fb) {
10671            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10672                                 DRAWSTATE_FRAMEBUFFER_INCOMPATIBLE, "DS",
10673                                 "vkCmdExecuteCommands() called w/ invalid secondary Cmd Buffer 0x%" PRIx64
10674                                 " which has a framebuffer 0x%" PRIx64
10675                                 " that is not the same as the primaryCB's current active framebuffer 0x%" PRIx64 ".",
10676                                 reinterpret_cast<uint64_t &>(secondaryBuffer), reinterpret_cast<uint64_t &>(secondary_fb),
10677                                 reinterpret_cast<uint64_t &>(primary_fb));
10678        }
10679        auto fb = getFramebufferState(dev_data, secondary_fb);
10680        if (!fb) {
10681            skip_call |=
10682                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10683                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
10684                                                                          "which has invalid framebuffer 0x%" PRIx64 ".",
10685                        (void *)secondaryBuffer, (uint64_t)(secondary_fb));
10686            return skip_call;
10687        }
10688        auto cb_renderpass = getRenderPass(dev_data, pSubCB->beginInfo.pInheritanceInfo->renderPass);
10689        if (cb_renderpass->renderPass != fb->createInfo.renderPass) {
10690            skip_call |= validateRenderPassCompatibility(dev_data, secondaryBuffer, fb->renderPassCreateInfo.ptr(), secondaryBuffer,
10691                                                         cb_renderpass->createInfo.ptr());
10692        }
10693    }
10694    return skip_call;
10695}
10696
10697static bool validateSecondaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, GLOBAL_CB_NODE *pSubCB) {
10698    bool skip_call = false;
10699    unordered_set<int> activeTypes;
10700    for (auto queryObject : pCB->activeQueries) {
10701        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
10702        if (queryPoolData != dev_data->queryPoolMap.end()) {
10703            if (queryPoolData->second.createInfo.queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS &&
10704                pSubCB->beginInfo.pInheritanceInfo) {
10705                VkQueryPipelineStatisticFlags cmdBufStatistics = pSubCB->beginInfo.pInheritanceInfo->pipelineStatistics;
10706                if ((cmdBufStatistics & queryPoolData->second.createInfo.pipelineStatistics) != cmdBufStatistics) {
10707                    skip_call |= log_msg(
10708                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10709                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10710                        "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
10711                        "which has invalid active query pool 0x%" PRIx64 ". Pipeline statistics is being queried so the command "
10712                        "buffer must have all bits set on the queryPool.",
10713                        reinterpret_cast<void *>(pCB->commandBuffer), reinterpret_cast<const uint64_t &>(queryPoolData->first));
10714                }
10715            }
10716            activeTypes.insert(queryPoolData->second.createInfo.queryType);
10717        }
10718    }
10719    for (auto queryObject : pSubCB->startedQueries) {
10720        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
10721        if (queryPoolData != dev_data->queryPoolMap.end() && activeTypes.count(queryPoolData->second.createInfo.queryType)) {
10722            skip_call |=
10723                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10724                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10725                        "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
10726                        "which has invalid active query pool 0x%" PRIx64 "of type %d but a query of that type has been started on "
10727                        "secondary Cmd Buffer 0x%p.",
10728                        reinterpret_cast<void *>(pCB->commandBuffer), reinterpret_cast<const uint64_t &>(queryPoolData->first),
10729                        queryPoolData->second.createInfo.queryType, reinterpret_cast<void *>(pSubCB->commandBuffer));
10730        }
10731    }
10732
10733    auto primary_pool = getCommandPoolNode(dev_data, pCB->createInfo.commandPool);
10734    auto secondary_pool = getCommandPoolNode(dev_data, pSubCB->createInfo.commandPool);
10735    if (primary_pool && secondary_pool && (primary_pool->queueFamilyIndex != secondary_pool->queueFamilyIndex)) {
10736        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10737                             reinterpret_cast<uint64_t>(pSubCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_QUEUE_FAMILY, "DS",
10738                             "vkCmdExecuteCommands(): Primary command buffer 0x%" PRIxLEAST64
10739                             " created in queue family %d has secondary command buffer 0x%" PRIxLEAST64 " created in queue family %d.",
10740                             reinterpret_cast<uint64_t>(pCB->commandBuffer), primary_pool->queueFamilyIndex,
10741                             reinterpret_cast<uint64_t>(pSubCB->commandBuffer), secondary_pool->queueFamilyIndex);
10742    }
10743
10744    return skip_call;
10745}
10746
10747VKAPI_ATTR void VKAPI_CALL
10748CmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount, const VkCommandBuffer *pCommandBuffers) {
10749    bool skip_call = false;
10750    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
10751    std::unique_lock<std::mutex> lock(global_lock);
10752    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
10753    if (pCB) {
10754        GLOBAL_CB_NODE *pSubCB = NULL;
10755        for (uint32_t i = 0; i < commandBuffersCount; i++) {
10756            pSubCB = getCBNode(dev_data, pCommandBuffers[i]);
10757            if (!pSubCB) {
10758                skip_call |=
10759                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10760                            DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10761                            "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p in element %u of pCommandBuffers array.",
10762                            (void *)pCommandBuffers[i], i);
10763            } else if (VK_COMMAND_BUFFER_LEVEL_PRIMARY == pSubCB->createInfo.level) {
10764                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
10765                                     __LINE__, DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10766                                     "vkCmdExecuteCommands() called w/ Primary Cmd Buffer 0x%p in element %u of pCommandBuffers "
10767                                     "array. All cmd buffers in pCommandBuffers array must be secondary.",
10768                                     (void *)pCommandBuffers[i], i);
10769            } else if (pCB->activeRenderPass) { // Secondary CB w/i RenderPass must have *CONTINUE_BIT set
10770                auto secondary_rp_node = getRenderPass(dev_data, pSubCB->beginInfo.pInheritanceInfo->renderPass);
10771                if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
10772                    skip_call |= log_msg(
10773                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10774                        (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
10775                        "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) executed within render pass (0x%" PRIxLEAST64
10776                        ") must have had vkBeginCommandBuffer() called w/ VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set.",
10777                        (void *)pCommandBuffers[i], (uint64_t)pCB->activeRenderPass->renderPass);
10778                } else {
10779                    // Make sure render pass is compatible with parent command buffer pass if has continue
10780                    if (pCB->activeRenderPass->renderPass != secondary_rp_node->renderPass) {
10781                        skip_call |=
10782                            validateRenderPassCompatibility(dev_data, commandBuffer, pCB->activeRenderPass->createInfo.ptr(),
10783                                                            pCommandBuffers[i], secondary_rp_node->createInfo.ptr());
10784                    }
10785                    //  If framebuffer for secondary CB is not NULL, then it must match active FB from primaryCB
10786                    skip_call |= validateFramebuffer(dev_data, commandBuffer, pCB, pCommandBuffers[i], pSubCB);
10787                }
10788                string errorString = "";
10789                // secondaryCB must have been created w/ RP compatible w/ primaryCB active renderpass
10790                if ((pCB->activeRenderPass->renderPass != secondary_rp_node->renderPass) &&
10791                    !verify_renderpass_compatibility(dev_data, pCB->activeRenderPass->createInfo.ptr(),
10792                                                     secondary_rp_node->createInfo.ptr(), errorString)) {
10793                    skip_call |= log_msg(
10794                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10795                        (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
10796                        "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) w/ render pass (0x%" PRIxLEAST64
10797                        ") is incompatible w/ primary command buffer (0x%p) w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
10798                        (void *)pCommandBuffers[i], (uint64_t)pSubCB->beginInfo.pInheritanceInfo->renderPass, (void *)commandBuffer,
10799                        (uint64_t)pCB->activeRenderPass->renderPass, errorString.c_str());
10800                }
10801            }
10802            // TODO(mlentine): Move more logic into this method
10803            skip_call |= validateSecondaryCommandBufferState(dev_data, pCB, pSubCB);
10804            skip_call |= validateCommandBufferState(dev_data, pSubCB, "vkCmdExecuteCommands()");
10805            // Secondary cmdBuffers are considered pending execution starting w/
10806            // being recorded
10807            if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
10808                if (dev_data->globalInFlightCmdBuffers.find(pSubCB->commandBuffer) != dev_data->globalInFlightCmdBuffers.end()) {
10809                    skip_call |= log_msg(
10810                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10811                        (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
10812                        "Attempt to simultaneously execute CB 0x%" PRIxLEAST64 " w/o VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
10813                        "set!",
10814                        (uint64_t)(pCB->commandBuffer));
10815                }
10816                if (pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) {
10817                    // Warn that non-simultaneous secondary cmd buffer renders primary non-simultaneous
10818                    skip_call |= log_msg(
10819                        dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10820                        (uint64_t)(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
10821                        "vkCmdExecuteCommands(): Secondary Command Buffer (0x%" PRIxLEAST64
10822                        ") does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary command buffer "
10823                        "(0x%" PRIxLEAST64 ") to be treated as if it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
10824                        "set, even though it does.",
10825                        (uint64_t)(pCommandBuffers[i]), (uint64_t)(pCB->commandBuffer));
10826                    pCB->beginInfo.flags &= ~VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
10827                }
10828            }
10829            if (!pCB->activeQueries.empty() && !dev_data->enabled_features.inheritedQueries) {
10830                skip_call |=
10831                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10832                            reinterpret_cast<uint64_t>(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
10833                            "vkCmdExecuteCommands(): Secondary Command Buffer "
10834                            "(0x%" PRIxLEAST64 ") cannot be submitted with a query in "
10835                            "flight and inherited queries not "
10836                            "supported on this device.",
10837                            reinterpret_cast<uint64_t>(pCommandBuffers[i]));
10838            }
10839            pSubCB->primaryCommandBuffer = pCB->commandBuffer;
10840            pCB->secondaryCommandBuffers.insert(pSubCB->commandBuffer);
10841            dev_data->globalInFlightCmdBuffers.insert(pSubCB->commandBuffer);
10842            for (auto &function : pSubCB->queryUpdates) {
10843                pCB->queryUpdates.push_back(function);
10844            }
10845        }
10846        skip_call |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdExecuteComands");
10847        skip_call |= addCmd(dev_data, pCB, CMD_EXECUTECOMMANDS, "vkCmdExecuteComands()");
10848    }
10849    lock.unlock();
10850    if (!skip_call)
10851        dev_data->dispatch_table.CmdExecuteCommands(commandBuffer, commandBuffersCount, pCommandBuffers);
10852}
10853
10854// For any image objects that overlap mapped memory, verify that their layouts are PREINIT or GENERAL
10855static bool ValidateMapImageLayouts(VkDevice device, DEVICE_MEM_INFO const *mem_info, VkDeviceSize offset,
10856                                    VkDeviceSize end_offset) {
10857    bool skip_call = false;
10858    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10859    // Iterate over all bound image ranges and verify that for any that overlap the
10860    //  map ranges, the layouts are VK_IMAGE_LAYOUT_PREINITIALIZED or VK_IMAGE_LAYOUT_GENERAL
10861    // TODO : This can be optimized if we store ranges based on starting address and early exit when we pass our range
10862    for (auto image_handle : mem_info->bound_images) {
10863        auto img_it = mem_info->bound_ranges.find(image_handle);
10864        if (img_it != mem_info->bound_ranges.end()) {
10865            if (rangesIntersect(dev_data, &img_it->second, offset, end_offset)) {
10866                std::vector<VkImageLayout> layouts;
10867                if (FindLayouts(dev_data, VkImage(image_handle), layouts)) {
10868                    for (auto layout : layouts) {
10869                        if (layout != VK_IMAGE_LAYOUT_PREINITIALIZED && layout != VK_IMAGE_LAYOUT_GENERAL) {
10870                            skip_call |=
10871                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
10872                                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot map an image with layout %s. Only "
10873                                                                                        "GENERAL or PREINITIALIZED are supported.",
10874                                        string_VkImageLayout(layout));
10875                        }
10876                    }
10877                }
10878            }
10879        }
10880    }
10881    return skip_call;
10882}
10883
10884VKAPI_ATTR VkResult VKAPI_CALL
10885MapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags, void **ppData) {
10886    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10887
10888    bool skip_call = false;
10889    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10890    std::unique_lock<std::mutex> lock(global_lock);
10891    DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem);
10892    if (mem_info) {
10893        // TODO : This could me more fine-grained to track just region that is valid
10894        mem_info->global_valid = true;
10895        auto end_offset = (VK_WHOLE_SIZE == size) ? mem_info->alloc_info.allocationSize - 1 : offset + size - 1;
10896        skip_call |= ValidateMapImageLayouts(device, mem_info, offset, end_offset);
10897        // TODO : Do we need to create new "bound_range" for the mapped range?
10898        SetMemRangesValid(dev_data, mem_info, offset, end_offset);
10899        if ((dev_data->phys_dev_mem_props.memoryTypes[mem_info->alloc_info.memoryTypeIndex].propertyFlags &
10900             VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
10901            skip_call =
10902                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10903                        (uint64_t)mem, __LINE__, MEMTRACK_INVALID_STATE, "MEM",
10904                        "Mapping Memory without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set: mem obj 0x%" PRIxLEAST64, (uint64_t)mem);
10905        }
10906    }
10907    skip_call |= ValidateMapMemRange(dev_data, mem, offset, size);
10908    lock.unlock();
10909
10910    if (!skip_call) {
10911        result = dev_data->dispatch_table.MapMemory(device, mem, offset, size, flags, ppData);
10912        if (VK_SUCCESS == result) {
10913            lock.lock();
10914            // TODO : What's the point of this range? See comment on creating new "bound_range" above, which may replace this
10915            storeMemRanges(dev_data, mem, offset, size);
10916            initializeAndTrackMemory(dev_data, mem, offset, size, ppData);
10917            lock.unlock();
10918        }
10919    }
10920    return result;
10921}
10922
10923VKAPI_ATTR void VKAPI_CALL UnmapMemory(VkDevice device, VkDeviceMemory mem) {
10924    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10925    bool skip_call = false;
10926
10927    std::unique_lock<std::mutex> lock(global_lock);
10928    skip_call |= deleteMemRanges(dev_data, mem);
10929    lock.unlock();
10930    if (!skip_call) {
10931        dev_data->dispatch_table.UnmapMemory(device, mem);
10932    }
10933}
10934
10935static bool validateMemoryIsMapped(layer_data *dev_data, const char *funcName, uint32_t memRangeCount,
10936                                   const VkMappedMemoryRange *pMemRanges) {
10937    bool skip_call = false;
10938    for (uint32_t i = 0; i < memRangeCount; ++i) {
10939        auto mem_info = getMemObjInfo(dev_data, pMemRanges[i].memory);
10940        if (mem_info) {
10941            if (mem_info->mem_range.offset > pMemRanges[i].offset) {
10942                skip_call |=
10943                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10944                            (uint64_t)pMemRanges[i].memory, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
10945                            "%s: Flush/Invalidate offset (" PRINTF_SIZE_T_SPECIFIER ") is less than Memory Object's offset "
10946                            "(" PRINTF_SIZE_T_SPECIFIER ").",
10947                            funcName, static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(mem_info->mem_range.offset));
10948            }
10949
10950            const uint64_t dev_dataTerminus = (mem_info->mem_range.size == VK_WHOLE_SIZE)
10951                                                  ? mem_info->alloc_info.allocationSize
10952                                                  : (mem_info->mem_range.offset + mem_info->mem_range.size);
10953            if (pMemRanges[i].size != VK_WHOLE_SIZE && (dev_dataTerminus < (pMemRanges[i].offset + pMemRanges[i].size))) {
10954                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10955                                     VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
10956                                     MEMTRACK_INVALID_MAP, "MEM", "%s: Flush/Invalidate upper-bound (" PRINTF_SIZE_T_SPECIFIER
10957                                                                  ") exceeds the Memory Object's upper-bound "
10958                                                                  "(" PRINTF_SIZE_T_SPECIFIER ").",
10959                                     funcName, static_cast<size_t>(pMemRanges[i].offset + pMemRanges[i].size),
10960                                     static_cast<size_t>(dev_dataTerminus));
10961            }
10962        }
10963    }
10964    return skip_call;
10965}
10966
10967static bool ValidateAndCopyNoncoherentMemoryToDriver(layer_data *dev_data, uint32_t memRangeCount,
10968                                                     const VkMappedMemoryRange *pMemRanges) {
10969    bool skip_call = false;
10970    for (uint32_t i = 0; i < memRangeCount; ++i) {
10971        auto mem_info = getMemObjInfo(dev_data, pMemRanges[i].memory);
10972        if (mem_info) {
10973            if (mem_info->shadow_copy) {
10974                VkDeviceSize size = (mem_info->mem_range.size != VK_WHOLE_SIZE)
10975                                        ? mem_info->mem_range.size
10976                                        : (mem_info->alloc_info.allocationSize - mem_info->mem_range.offset);
10977                char *data = static_cast<char *>(mem_info->shadow_copy);
10978                for (uint64_t j = 0; j < mem_info->shadow_pad_size; ++j) {
10979                    if (data[j] != NoncoherentMemoryFillValue) {
10980                        skip_call |= log_msg(
10981                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10982                            (uint64_t)pMemRanges[i].memory, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
10983                            "Memory underflow was detected on mem obj 0x%" PRIxLEAST64, (uint64_t)pMemRanges[i].memory);
10984                    }
10985                }
10986                for (uint64_t j = (size + mem_info->shadow_pad_size); j < (2 * mem_info->shadow_pad_size + size); ++j) {
10987                    if (data[j] != NoncoherentMemoryFillValue) {
10988                        skip_call |= log_msg(
10989                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10990                            (uint64_t)pMemRanges[i].memory, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
10991                            "Memory overflow was detected on mem obj 0x%" PRIxLEAST64, (uint64_t)pMemRanges[i].memory);
10992                    }
10993                }
10994                memcpy(mem_info->p_driver_data, static_cast<void *>(data + mem_info->shadow_pad_size), (size_t)(size));
10995            }
10996        }
10997    }
10998    return skip_call;
10999}
11000
11001static void CopyNoncoherentMemoryFromDriver(layer_data *dev_data, uint32_t memory_range_count,
11002                                            const VkMappedMemoryRange *mem_ranges) {
11003    for (uint32_t i = 0; i < memory_range_count; ++i) {
11004        auto mem_info = getMemObjInfo(dev_data, mem_ranges[i].memory);
11005        if (mem_info && mem_info->shadow_copy) {
11006            VkDeviceSize size = (mem_info->mem_range.size != VK_WHOLE_SIZE)
11007                                    ? mem_info->mem_range.size
11008                                    : (mem_info->alloc_info.allocationSize - mem_ranges[i].offset);
11009            char *data = static_cast<char *>(mem_info->shadow_copy);
11010            memcpy(data + mem_info->shadow_pad_size, mem_info->p_driver_data, (size_t)(size));
11011        }
11012    }
11013}
11014
11015VkResult VKAPI_CALL
11016FlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) {
11017    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
11018    bool skip_call = false;
11019    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11020
11021    std::unique_lock<std::mutex> lock(global_lock);
11022    skip_call |= ValidateAndCopyNoncoherentMemoryToDriver(dev_data, memRangeCount, pMemRanges);
11023    skip_call |= validateMemoryIsMapped(dev_data, "vkFlushMappedMemoryRanges", memRangeCount, pMemRanges);
11024    lock.unlock();
11025    if (!skip_call) {
11026        result = dev_data->dispatch_table.FlushMappedMemoryRanges(device, memRangeCount, pMemRanges);
11027    }
11028    return result;
11029}
11030
11031VkResult VKAPI_CALL
11032InvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) {
11033    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
11034    bool skip_call = false;
11035    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11036
11037    std::unique_lock<std::mutex> lock(global_lock);
11038    skip_call |= validateMemoryIsMapped(dev_data, "vkInvalidateMappedMemoryRanges", memRangeCount, pMemRanges);
11039    lock.unlock();
11040    if (!skip_call) {
11041        result = dev_data->dispatch_table.InvalidateMappedMemoryRanges(device, memRangeCount, pMemRanges);
11042        // Update our shadow copy with modified driver data
11043        CopyNoncoherentMemoryFromDriver(dev_data, memRangeCount, pMemRanges);
11044    }
11045    return result;
11046}
11047
11048VKAPI_ATTR VkResult VKAPI_CALL BindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
11049    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11050    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
11051    bool skip_call = false;
11052    std::unique_lock<std::mutex> lock(global_lock);
11053    auto image_node = getImageNode(dev_data, image);
11054    if (image_node) {
11055        // Track objects tied to memory
11056        uint64_t image_handle = reinterpret_cast<uint64_t &>(image);
11057        skip_call = SetMemBinding(dev_data, mem, image_handle, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "vkBindImageMemory");
11058        VkMemoryRequirements memRequirements;
11059        lock.unlock();
11060        dev_data->dispatch_table.GetImageMemoryRequirements(device, image, &memRequirements);
11061        lock.lock();
11062
11063        // Track and validate bound memory range information
11064        auto mem_info = getMemObjInfo(dev_data, mem);
11065        if (mem_info) {
11066            skip_call |= InsertImageMemoryRange(dev_data, image, mem_info, memoryOffset, memRequirements,
11067                                                image_node->createInfo.tiling == VK_IMAGE_TILING_LINEAR);
11068            skip_call |= ValidateMemoryTypes(dev_data, mem_info, memRequirements.memoryTypeBits, "vkBindImageMemory");
11069        }
11070
11071        print_mem_list(dev_data);
11072        lock.unlock();
11073        if (!skip_call) {
11074            result = dev_data->dispatch_table.BindImageMemory(device, image, mem, memoryOffset);
11075            lock.lock();
11076            image_node->mem = mem;
11077            image_node->memOffset = memoryOffset;
11078            image_node->memSize = memRequirements.size;
11079            lock.unlock();
11080        }
11081    } else {
11082        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
11083                reinterpret_cast<const uint64_t &>(image), __LINE__, MEMTRACK_INVALID_OBJECT, "MT",
11084                "vkBindImageMemory: Cannot find invalid image 0x%" PRIx64 ", has it already been deleted?",
11085                reinterpret_cast<const uint64_t &>(image));
11086    }
11087    return result;
11088}
11089
11090VKAPI_ATTR VkResult VKAPI_CALL SetEvent(VkDevice device, VkEvent event) {
11091    bool skip_call = false;
11092    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
11093    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11094    std::unique_lock<std::mutex> lock(global_lock);
11095    auto event_node = getEventNode(dev_data, event);
11096    if (event_node) {
11097        event_node->needsSignaled = false;
11098        event_node->stageMask = VK_PIPELINE_STAGE_HOST_BIT;
11099        if (event_node->write_in_use) {
11100            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
11101                                 reinterpret_cast<const uint64_t &>(event), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
11102                                 "Cannot call vkSetEvent() on event 0x%" PRIxLEAST64 " that is already in use by a command buffer.",
11103                                 reinterpret_cast<const uint64_t &>(event));
11104        }
11105    }
11106    lock.unlock();
11107    // Host setting event is visible to all queues immediately so update stageMask for any queue that's seen this event
11108    // TODO : For correctness this needs separate fix to verify that app doesn't make incorrect assumptions about the
11109    // ordering of this command in relation to vkCmd[Set|Reset]Events (see GH297)
11110    for (auto queue_data : dev_data->queueMap) {
11111        auto event_entry = queue_data.second.eventToStageMap.find(event);
11112        if (event_entry != queue_data.second.eventToStageMap.end()) {
11113            event_entry->second |= VK_PIPELINE_STAGE_HOST_BIT;
11114        }
11115    }
11116    if (!skip_call)
11117        result = dev_data->dispatch_table.SetEvent(device, event);
11118    return result;
11119}
11120
11121VKAPI_ATTR VkResult VKAPI_CALL
11122QueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo, VkFence fence) {
11123    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
11124    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
11125    bool skip_call = false;
11126    std::unique_lock<std::mutex> lock(global_lock);
11127    auto pFence = getFenceNode(dev_data, fence);
11128    auto pQueue = getQueueNode(dev_data, queue);
11129
11130    // First verify that fence is not in use
11131    skip_call |= ValidateFenceForSubmit(dev_data, pFence);
11132
11133    if (pFence) {
11134        SubmitFence(pQueue, pFence, bindInfoCount);
11135    }
11136
11137    for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) {
11138        const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx];
11139        // Track objects tied to memory
11140        for (uint32_t j = 0; j < bindInfo.bufferBindCount; j++) {
11141            for (uint32_t k = 0; k < bindInfo.pBufferBinds[j].bindCount; k++) {
11142                if (set_sparse_mem_binding(dev_data, bindInfo.pBufferBinds[j].pBinds[k].memory,
11143                                           (uint64_t)bindInfo.pBufferBinds[j].buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
11144                                           "vkQueueBindSparse"))
11145                    skip_call = true;
11146            }
11147        }
11148        for (uint32_t j = 0; j < bindInfo.imageOpaqueBindCount; j++) {
11149            for (uint32_t k = 0; k < bindInfo.pImageOpaqueBinds[j].bindCount; k++) {
11150                if (set_sparse_mem_binding(dev_data, bindInfo.pImageOpaqueBinds[j].pBinds[k].memory,
11151                                           (uint64_t)bindInfo.pImageOpaqueBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
11152                                           "vkQueueBindSparse"))
11153                    skip_call = true;
11154            }
11155        }
11156        for (uint32_t j = 0; j < bindInfo.imageBindCount; j++) {
11157            for (uint32_t k = 0; k < bindInfo.pImageBinds[j].bindCount; k++) {
11158                if (set_sparse_mem_binding(dev_data, bindInfo.pImageBinds[j].pBinds[k].memory,
11159                                           (uint64_t)bindInfo.pImageBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
11160                                           "vkQueueBindSparse"))
11161                    skip_call = true;
11162            }
11163        }
11164
11165        std::vector<SEMAPHORE_WAIT> semaphore_waits;
11166        std::vector<VkSemaphore> semaphore_signals;
11167        for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) {
11168            VkSemaphore semaphore = bindInfo.pWaitSemaphores[i];
11169            auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
11170            if (pSemaphore) {
11171                if (pSemaphore->signaled) {
11172                    if (pSemaphore->signaler.first != VK_NULL_HANDLE) {
11173                        semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second});
11174                        pSemaphore->in_use.fetch_add(1);
11175                    }
11176                    pSemaphore->signaler.first = VK_NULL_HANDLE;
11177                    pSemaphore->signaled = false;
11178                } else {
11179                    skip_call |=
11180                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
11181                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
11182                                "vkQueueBindSparse: Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64
11183                                " that has no way to be signaled.",
11184                                reinterpret_cast<const uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
11185                }
11186            }
11187        }
11188        for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) {
11189            VkSemaphore semaphore = bindInfo.pSignalSemaphores[i];
11190            auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
11191            if (pSemaphore) {
11192                if (pSemaphore->signaled) {
11193                    skip_call =
11194                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
11195                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
11196                                "vkQueueBindSparse: Queue 0x%" PRIx64 " is signaling semaphore 0x%" PRIx64
11197                                ", but that semaphore is already signaled.",
11198                                reinterpret_cast<const uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
11199                }
11200                else {
11201                    pSemaphore->signaler.first = queue;
11202                    pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1;
11203                    pSemaphore->signaled = true;
11204                    pSemaphore->in_use.fetch_add(1);
11205                    semaphore_signals.push_back(semaphore);
11206                }
11207            }
11208        }
11209
11210        pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(),
11211                                         semaphore_waits,
11212                                         semaphore_signals,
11213                                         bindIdx == bindInfoCount - 1 ? fence : VK_NULL_HANDLE);
11214    }
11215
11216    if (pFence && !bindInfoCount) {
11217        // No work to do, just dropping a fence in the queue by itself.
11218        pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(),
11219                                         std::vector<SEMAPHORE_WAIT>(),
11220                                         std::vector<VkSemaphore>(),
11221                                         fence);
11222    }
11223
11224    print_mem_list(dev_data);
11225    lock.unlock();
11226
11227    if (!skip_call)
11228        return dev_data->dispatch_table.QueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
11229
11230    return result;
11231}
11232
11233VKAPI_ATTR VkResult VKAPI_CALL CreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo,
11234                                               const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore) {
11235    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11236    VkResult result = dev_data->dispatch_table.CreateSemaphore(device, pCreateInfo, pAllocator, pSemaphore);
11237    if (result == VK_SUCCESS) {
11238        std::lock_guard<std::mutex> lock(global_lock);
11239        SEMAPHORE_NODE* sNode = &dev_data->semaphoreMap[*pSemaphore];
11240        sNode->signaler.first = VK_NULL_HANDLE;
11241        sNode->signaler.second = 0;
11242        sNode->signaled = false;
11243    }
11244    return result;
11245}
11246
11247VKAPI_ATTR VkResult VKAPI_CALL
11248CreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkEvent *pEvent) {
11249    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11250    VkResult result = dev_data->dispatch_table.CreateEvent(device, pCreateInfo, pAllocator, pEvent);
11251    if (result == VK_SUCCESS) {
11252        std::lock_guard<std::mutex> lock(global_lock);
11253        dev_data->eventMap[*pEvent].needsSignaled = false;
11254        dev_data->eventMap[*pEvent].write_in_use = 0;
11255        dev_data->eventMap[*pEvent].stageMask = VkPipelineStageFlags(0);
11256    }
11257    return result;
11258}
11259
11260static bool PreCallValidateCreateSwapchainKHR(layer_data *dev_data, VkSwapchainCreateInfoKHR const *pCreateInfo,
11261                                              SURFACE_STATE *surface_state, SWAPCHAIN_NODE *old_swapchain_state) {
11262    auto most_recent_swapchain = surface_state->swapchain ? surface_state->swapchain : surface_state->old_swapchain;
11263
11264    if (most_recent_swapchain != old_swapchain_state || (surface_state->old_swapchain && surface_state->swapchain)) {
11265        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11266                    reinterpret_cast<uint64_t>(dev_data->device), __LINE__, DRAWSTATE_SWAPCHAIN_ALREADY_EXISTS, "DS",
11267                    "vkCreateSwapchainKHR(): surface has an existing swapchain other than oldSwapchain"))
11268            return true;
11269    }
11270    if (old_swapchain_state && old_swapchain_state->createInfo.surface != pCreateInfo->surface) {
11271        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
11272                    reinterpret_cast<uint64_t const &>(pCreateInfo->oldSwapchain), __LINE__, DRAWSTATE_SWAPCHAIN_WRONG_SURFACE,
11273                    "DS", "vkCreateSwapchainKHR(): pCreateInfo->oldSwapchain's surface is not pCreateInfo->surface"))
11274            return true;
11275    }
11276
11277    return false;
11278}
11279
11280VKAPI_ATTR VkResult VKAPI_CALL CreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
11281                                                  const VkAllocationCallbacks *pAllocator,
11282                                                  VkSwapchainKHR *pSwapchain) {
11283    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11284    auto surface_state = getSurfaceState(dev_data->instance_data, pCreateInfo->surface);
11285    auto old_swapchain_state = getSwapchainNode(dev_data, pCreateInfo->oldSwapchain);
11286
11287    if (PreCallValidateCreateSwapchainKHR(dev_data, pCreateInfo, surface_state, old_swapchain_state))
11288        return VK_ERROR_VALIDATION_FAILED_EXT;
11289
11290    VkResult result = dev_data->dispatch_table.CreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);
11291
11292    if (VK_SUCCESS == result) {
11293        std::lock_guard<std::mutex> lock(global_lock);
11294        auto swapchain_state = unique_ptr<SWAPCHAIN_NODE>(new SWAPCHAIN_NODE(pCreateInfo, *pSwapchain));
11295        surface_state->swapchain = swapchain_state.get();
11296        dev_data->device_extensions.swapchainMap[*pSwapchain] = std::move(swapchain_state);
11297    } else {
11298        surface_state->swapchain = nullptr;
11299    }
11300
11301    // Spec requires that even if CreateSwapchainKHR fails, oldSwapchain behaves as replaced.
11302    surface_state->old_swapchain = old_swapchain_state;
11303
11304    return result;
11305}
11306
11307VKAPI_ATTR void VKAPI_CALL
11308DestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
11309    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11310    bool skip_call = false;
11311
11312    std::unique_lock<std::mutex> lock(global_lock);
11313    auto swapchain_data = getSwapchainNode(dev_data, swapchain);
11314    if (swapchain_data) {
11315        if (swapchain_data->images.size() > 0) {
11316            for (auto swapchain_image : swapchain_data->images) {
11317                auto image_sub = dev_data->imageSubresourceMap.find(swapchain_image);
11318                if (image_sub != dev_data->imageSubresourceMap.end()) {
11319                    for (auto imgsubpair : image_sub->second) {
11320                        auto image_item = dev_data->imageLayoutMap.find(imgsubpair);
11321                        if (image_item != dev_data->imageLayoutMap.end()) {
11322                            dev_data->imageLayoutMap.erase(image_item);
11323                        }
11324                    }
11325                    dev_data->imageSubresourceMap.erase(image_sub);
11326                }
11327                skip_call =
11328                    clear_object_binding(dev_data, (uint64_t)swapchain_image, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT);
11329                dev_data->imageMap.erase(swapchain_image);
11330            }
11331        }
11332
11333        auto surface_state = getSurfaceState(dev_data->instance_data, swapchain_data->createInfo.surface);
11334        if (surface_state) {
11335            if (surface_state->swapchain == swapchain_data)
11336                surface_state->swapchain = nullptr;
11337            if (surface_state->old_swapchain == swapchain_data)
11338                surface_state->old_swapchain = nullptr;
11339        }
11340
11341        dev_data->device_extensions.swapchainMap.erase(swapchain);
11342    }
11343    lock.unlock();
11344    if (!skip_call)
11345        dev_data->dispatch_table.DestroySwapchainKHR(device, swapchain, pAllocator);
11346}
11347
11348VKAPI_ATTR VkResult VKAPI_CALL
11349GetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pCount, VkImage *pSwapchainImages) {
11350    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11351    VkResult result = dev_data->dispatch_table.GetSwapchainImagesKHR(device, swapchain, pCount, pSwapchainImages);
11352
11353    if (result == VK_SUCCESS && pSwapchainImages != NULL) {
11354        // This should never happen and is checked by param checker.
11355        if (!pCount)
11356            return result;
11357        std::lock_guard<std::mutex> lock(global_lock);
11358        const size_t count = *pCount;
11359        auto swapchain_node = getSwapchainNode(dev_data, swapchain);
11360        if (swapchain_node && !swapchain_node->images.empty()) {
11361            // TODO : Not sure I like the memcmp here, but it works
11362            const bool mismatch = (swapchain_node->images.size() != count ||
11363                                   memcmp(&swapchain_node->images[0], pSwapchainImages, sizeof(swapchain_node->images[0]) * count));
11364            if (mismatch) {
11365                // TODO: Verify against Valid Usage section of extension
11366                log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
11367                        (uint64_t)swapchain, __LINE__, MEMTRACK_NONE, "SWAP_CHAIN",
11368                        "vkGetSwapchainInfoKHR(0x%" PRIx64
11369                        ", VK_SWAP_CHAIN_INFO_TYPE_PERSISTENT_IMAGES_KHR) returned mismatching data",
11370                        (uint64_t)(swapchain));
11371            }
11372        }
11373        for (uint32_t i = 0; i < *pCount; ++i) {
11374            IMAGE_LAYOUT_NODE image_layout_node;
11375            image_layout_node.layout = VK_IMAGE_LAYOUT_UNDEFINED;
11376            image_layout_node.format = swapchain_node->createInfo.imageFormat;
11377            // Add imageMap entries for each swapchain image
11378            VkImageCreateInfo image_ci = {};
11379            image_ci.mipLevels = 1;
11380            image_ci.arrayLayers = swapchain_node->createInfo.imageArrayLayers;
11381            image_ci.usage = swapchain_node->createInfo.imageUsage;
11382            image_ci.format = swapchain_node->createInfo.imageFormat;
11383            image_ci.samples = VK_SAMPLE_COUNT_1_BIT;
11384            image_ci.extent.width = swapchain_node->createInfo.imageExtent.width;
11385            image_ci.extent.height = swapchain_node->createInfo.imageExtent.height;
11386            image_ci.sharingMode = swapchain_node->createInfo.imageSharingMode;
11387            dev_data->imageMap[pSwapchainImages[i]] = unique_ptr<IMAGE_NODE>(new IMAGE_NODE(pSwapchainImages[i], &image_ci));
11388            auto &image_node = dev_data->imageMap[pSwapchainImages[i]];
11389            image_node->valid = false;
11390            image_node->mem = MEMTRACKER_SWAP_CHAIN_IMAGE_KEY;
11391            swapchain_node->images.push_back(pSwapchainImages[i]);
11392            ImageSubresourcePair subpair = {pSwapchainImages[i], false, VkImageSubresource()};
11393            dev_data->imageSubresourceMap[pSwapchainImages[i]].push_back(subpair);
11394            dev_data->imageLayoutMap[subpair] = image_layout_node;
11395            dev_data->device_extensions.imageToSwapchainMap[pSwapchainImages[i]] = swapchain;
11396        }
11397    }
11398    return result;
11399}
11400
11401VKAPI_ATTR VkResult VKAPI_CALL QueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) {
11402    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
11403    bool skip_call = false;
11404
11405    std::lock_guard<std::mutex> lock(global_lock);
11406    for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
11407        auto pSemaphore = getSemaphoreNode(dev_data, pPresentInfo->pWaitSemaphores[i]);
11408        if (pSemaphore && !pSemaphore->signaled) {
11409            skip_call |=
11410                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
11411                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
11412                            "Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.",
11413                            reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(pPresentInfo->pWaitSemaphores[i]));
11414        }
11415    }
11416
11417    for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
11418        auto swapchain_data = getSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
11419        if (swapchain_data) {
11420            if (pPresentInfo->pImageIndices[i] >= swapchain_data->images.size()) {
11421                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
11422                                     reinterpret_cast<uint64_t const &>(pPresentInfo->pSwapchains[i]), __LINE__, DRAWSTATE_SWAPCHAIN_INVALID_IMAGE,
11423                                     "DS", "vkQueuePresentKHR: Swapchain image index too large (%u). There are only %u images in this swapchain.",
11424                                     pPresentInfo->pImageIndices[i], (uint32_t)swapchain_data->images.size());
11425            }
11426            else {
11427                auto image = swapchain_data->images[pPresentInfo->pImageIndices[i]];
11428                auto image_node = getImageNode(dev_data, image);
11429                skip_call |= ValidateImageMemoryIsValid(dev_data, image_node, "vkQueuePresentKHR()");
11430
11431                if (!image_node->acquired) {
11432                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
11433                                         reinterpret_cast<uint64_t const &>(pPresentInfo->pSwapchains[i]), __LINE__, DRAWSTATE_SWAPCHAIN_IMAGE_NOT_ACQUIRED,
11434                                         "DS", "vkQueuePresentKHR: Swapchain image index %u has not been acquired.",
11435                                         pPresentInfo->pImageIndices[i]);
11436                }
11437
11438                vector<VkImageLayout> layouts;
11439                if (FindLayouts(dev_data, image, layouts)) {
11440                    for (auto layout : layouts) {
11441                        if (layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) {
11442                            skip_call |=
11443                                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
11444                                            reinterpret_cast<uint64_t &>(queue), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
11445                                            "Images passed to present must be in layout "
11446                                            "VK_IMAGE_LAYOUT_PRESENT_SRC_KHR but is in %s",
11447                                            string_VkImageLayout(layout));
11448                        }
11449                    }
11450                }
11451            }
11452        }
11453    }
11454
11455    if (skip_call) {
11456        return VK_ERROR_VALIDATION_FAILED_EXT;
11457    }
11458
11459    VkResult result = dev_data->dispatch_table.QueuePresentKHR(queue, pPresentInfo);
11460
11461    if (result != VK_ERROR_VALIDATION_FAILED_EXT) {
11462        // Semaphore waits occur before error generation, if the call reached
11463        // the ICD. (Confirm?)
11464        for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
11465            auto pSemaphore = getSemaphoreNode(dev_data, pPresentInfo->pWaitSemaphores[i]);
11466            if (pSemaphore) {
11467                pSemaphore->signaler.first = VK_NULL_HANDLE;
11468                pSemaphore->signaled = false;
11469            }
11470        }
11471
11472        for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
11473            // Note: this is imperfect, in that we can get confused about what
11474            // did or didn't succeed-- but if the app does that, it's confused
11475            // itself just as much.
11476            auto local_result = pPresentInfo->pResults ? pPresentInfo->pResults[i] : result;
11477
11478            if (local_result != VK_SUCCESS && local_result != VK_SUBOPTIMAL_KHR)
11479                continue; // this present didn't actually happen.
11480
11481            // Mark the image as having been released to the WSI
11482            auto swapchain_data = getSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
11483            auto image = swapchain_data->images[pPresentInfo->pImageIndices[i]];
11484            auto image_node = getImageNode(dev_data, image);
11485            image_node->acquired = false;
11486        }
11487
11488        // Note: even though presentation is directed to a queue, there is no
11489        // direct ordering between QP and subsequent work, so QP (and its
11490        // semaphore waits) /never/ participate in any completion proof.
11491    }
11492
11493    return result;
11494}
11495
11496VKAPI_ATTR VkResult VKAPI_CALL CreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount,
11497                                                         const VkSwapchainCreateInfoKHR *pCreateInfos,
11498                                                         const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchains) {
11499    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11500    std::unique_lock<std::mutex> lock(global_lock);
11501    VkResult result =
11502        dev_data->dispatch_table.CreateSharedSwapchainsKHR(device, swapchainCount, pCreateInfos, pAllocator, pSwapchains);
11503    return result;
11504}
11505
11506VKAPI_ATTR VkResult VKAPI_CALL AcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
11507                                                   VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
11508    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11509    bool skip_call = false;
11510
11511    std::unique_lock<std::mutex> lock(global_lock);
11512
11513    if (fence == VK_NULL_HANDLE && semaphore == VK_NULL_HANDLE) {
11514        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11515                             reinterpret_cast<uint64_t &>(device), __LINE__, DRAWSTATE_SWAPCHAIN_NO_SYNC_FOR_ACQUIRE, "DS",
11516                             "vkAcquireNextImageKHR: Semaphore and fence cannot both be VK_NULL_HANDLE. There would be no way "
11517                             "to determine the completion of this operation.");
11518    }
11519
11520    auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
11521    if (pSemaphore && pSemaphore->signaled) {
11522        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
11523                             reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
11524                             "vkAcquireNextImageKHR: Semaphore must not be currently signaled or in a wait state");
11525    }
11526
11527    auto pFence = getFenceNode(dev_data, fence);
11528    if (pFence) {
11529        skip_call |= ValidateFenceForSubmit(dev_data, pFence);
11530    }
11531    lock.unlock();
11532
11533    if (skip_call)
11534        return VK_ERROR_VALIDATION_FAILED_EXT;
11535
11536    VkResult result = dev_data->dispatch_table.AcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex);
11537
11538    lock.lock();
11539    if (result == VK_SUCCESS || result == VK_SUBOPTIMAL_KHR) {
11540        if (pFence) {
11541            pFence->state = FENCE_INFLIGHT;
11542            pFence->signaler.first = VK_NULL_HANDLE;   // ANI isn't on a queue, so this can't participate in a completion proof.
11543        }
11544
11545        // A successful call to AcquireNextImageKHR counts as a signal operation on semaphore
11546        if (pSemaphore) {
11547            pSemaphore->signaled = true;
11548            pSemaphore->signaler.first = VK_NULL_HANDLE;
11549        }
11550
11551        // Mark the image as acquired.
11552        auto swapchain_data = getSwapchainNode(dev_data, swapchain);
11553        auto image = swapchain_data->images[*pImageIndex];
11554        auto image_node = getImageNode(dev_data, image);
11555        image_node->acquired = true;
11556    }
11557    lock.unlock();
11558
11559    return result;
11560}
11561
11562VKAPI_ATTR VkResult VKAPI_CALL EnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount,
11563                                                        VkPhysicalDevice *pPhysicalDevices) {
11564    bool skip_call = false;
11565    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
11566    if (instance_data->instance_state) {
11567        // For this instance, flag when vkEnumeratePhysicalDevices goes to QUERY_COUNT and then QUERY_DETAILS
11568        if (NULL == pPhysicalDevices) {
11569            instance_data->instance_state->vkEnumeratePhysicalDevicesState = QUERY_COUNT;
11570        } else {
11571            if (UNCALLED == instance_data->instance_state->vkEnumeratePhysicalDevicesState) {
11572                // Flag warning here. You can call this without having queried the count, but it may not be
11573                // robust on platforms with multiple physical devices.
11574                skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT,
11575                                    0, __LINE__, DEVLIMITS_MISSING_QUERY_COUNT, "DL",
11576                                    "Call sequence has vkEnumeratePhysicalDevices() w/ non-NULL pPhysicalDevices. You should first "
11577                                    "call vkEnumeratePhysicalDevices() w/ NULL pPhysicalDevices to query pPhysicalDeviceCount.");
11578            } // TODO : Could also flag a warning if re-calling this function in QUERY_DETAILS state
11579            else if (instance_data->instance_state->physical_devices_count != *pPhysicalDeviceCount) {
11580                // Having actual count match count from app is not a requirement, so this can be a warning
11581                skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11582                                    VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
11583                                    "Call to vkEnumeratePhysicalDevices() w/ pPhysicalDeviceCount value %u, but actual count "
11584                                    "supported by this instance is %u.",
11585                                    *pPhysicalDeviceCount, instance_data->instance_state->physical_devices_count);
11586            }
11587            instance_data->instance_state->vkEnumeratePhysicalDevicesState = QUERY_DETAILS;
11588        }
11589        if (skip_call) {
11590            return VK_ERROR_VALIDATION_FAILED_EXT;
11591        }
11592        VkResult result = instance_data->dispatch_table.EnumeratePhysicalDevices(instance, pPhysicalDeviceCount, pPhysicalDevices);
11593        if (NULL == pPhysicalDevices) {
11594            instance_data->instance_state->physical_devices_count = *pPhysicalDeviceCount;
11595        } else if (result == VK_SUCCESS){ // Save physical devices
11596            for (uint32_t i = 0; i < *pPhysicalDeviceCount; i++) {
11597                auto & phys_device_state = instance_data->physical_device_map[pPhysicalDevices[i]];
11598                phys_device_state.phys_device = pPhysicalDevices[i];
11599                // Init actual features for each physical device
11600                instance_data->dispatch_table.GetPhysicalDeviceFeatures(pPhysicalDevices[i], &phys_device_state.features);
11601            }
11602        }
11603        return result;
11604    } else {
11605        log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, 0, __LINE__,
11606                DEVLIMITS_INVALID_INSTANCE, "DL", "Invalid instance (0x%" PRIxLEAST64 ") passed into vkEnumeratePhysicalDevices().",
11607                (uint64_t)instance);
11608    }
11609    return VK_ERROR_VALIDATION_FAILED_EXT;
11610}
11611
11612VKAPI_ATTR void VKAPI_CALL
11613GetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
11614    VkQueueFamilyProperties *pQueueFamilyProperties) {
11615    bool skip_call = false;
11616    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11617    auto physical_device_state = getPhysicalDeviceState(instance_data, physicalDevice);
11618    if (physical_device_state) {
11619        if (!pQueueFamilyProperties) {
11620            physical_device_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_COUNT;
11621        }
11622        else {
11623            // Verify that for each physical device, this function is called first with NULL pQueueFamilyProperties ptr in order to
11624            // get count
11625            if (UNCALLED == physical_device_state->vkGetPhysicalDeviceQueueFamilyPropertiesState) {
11626                skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11627                    VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_MISSING_QUERY_COUNT, "DL",
11628                    "Call sequence has vkGetPhysicalDeviceQueueFamilyProperties() w/ non-NULL "
11629                    "pQueueFamilyProperties. You should first call vkGetPhysicalDeviceQueueFamilyProperties() w/ "
11630                    "NULL pQueueFamilyProperties to query pCount.");
11631            }
11632            // Then verify that pCount that is passed in on second call matches what was returned
11633            if (physical_device_state->queueFamilyPropertiesCount != *pCount) {
11634
11635                // TODO: this is not a requirement of the Valid Usage section for vkGetPhysicalDeviceQueueFamilyProperties, so
11636                // provide as warning
11637                skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11638                    VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
11639                    "Call to vkGetPhysicalDeviceQueueFamilyProperties() w/ pCount value %u, but actual count "
11640                    "supported by this physicalDevice is %u.",
11641                    *pCount, physical_device_state->queueFamilyPropertiesCount);
11642            }
11643            physical_device_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_DETAILS;
11644        }
11645        if (skip_call) {
11646            return;
11647        }
11648        instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(physicalDevice, pCount, pQueueFamilyProperties);
11649        if (!pQueueFamilyProperties) {
11650            physical_device_state->queueFamilyPropertiesCount = *pCount;
11651        }
11652        else { // Save queue family properties
11653            if (physical_device_state->queue_family_properties.size() < *pCount)
11654                physical_device_state->queue_family_properties.resize(*pCount);
11655            for (uint32_t i = 0; i < *pCount; i++) {
11656                physical_device_state->queue_family_properties[i] = pQueueFamilyProperties[i];
11657            }
11658        }
11659    }
11660    else {
11661        log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
11662            __LINE__, DEVLIMITS_INVALID_PHYSICAL_DEVICE, "DL",
11663            "Invalid physicalDevice (0x%" PRIxLEAST64 ") passed into vkGetPhysicalDeviceQueueFamilyProperties().",
11664            (uint64_t)physicalDevice);
11665    }
11666}
11667
11668template<typename TCreateInfo, typename FPtr>
11669static VkResult CreateSurface(VkInstance instance, TCreateInfo const *pCreateInfo,
11670                              VkAllocationCallbacks const *pAllocator, VkSurfaceKHR *pSurface,
11671                              FPtr fptr)
11672{
11673    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
11674
11675    // Call down the call chain:
11676    VkResult result = (instance_data->dispatch_table.*fptr)(instance, pCreateInfo, pAllocator, pSurface);
11677
11678    if (result == VK_SUCCESS) {
11679        std::unique_lock<std::mutex> lock(global_lock);
11680        instance_data->surface_map[*pSurface] = SURFACE_STATE(*pSurface);
11681        lock.unlock();
11682    }
11683
11684    return result;
11685}
11686
11687VKAPI_ATTR void VKAPI_CALL DestroySurfaceKHR(VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks *pAllocator) {
11688    bool skip_call = false;
11689    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
11690    std::unique_lock<std::mutex> lock(global_lock);
11691    auto surface_state = getSurfaceState(instance_data, surface);
11692
11693    if (surface_state) {
11694        // TODO: track swapchains created from this surface.
11695        instance_data->surface_map.erase(surface);
11696    }
11697    lock.unlock();
11698
11699    if (!skip_call) {
11700        // Call down the call chain:
11701        instance_data->dispatch_table.DestroySurfaceKHR(instance, surface, pAllocator);
11702    }
11703}
11704
11705#ifdef VK_USE_PLATFORM_ANDROID_KHR
11706VKAPI_ATTR VkResult VKAPI_CALL CreateAndroidSurfaceKHR(VkInstance instance, const VkAndroidSurfaceCreateInfoKHR *pCreateInfo,
11707                                                       const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11708    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateAndroidSurfaceKHR);
11709}
11710#endif // VK_USE_PLATFORM_ANDROID_KHR
11711
11712#ifdef VK_USE_PLATFORM_MIR_KHR
11713VKAPI_ATTR VkResult VKAPI_CALL CreateMirSurfaceKHR(VkInstance instance, const VkMirSurfaceCreateInfoKHR *pCreateInfo,
11714                                                   const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11715    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateMirSurfaceKHR);
11716}
11717#endif // VK_USE_PLATFORM_MIR_KHR
11718
11719#ifdef VK_USE_PLATFORM_WAYLAND_KHR
11720VKAPI_ATTR VkResult VKAPI_CALL CreateWaylandSurfaceKHR(VkInstance instance, const VkWaylandSurfaceCreateInfoKHR *pCreateInfo,
11721                                                       const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11722    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateWaylandSurfaceKHR);
11723}
11724#endif // VK_USE_PLATFORM_WAYLAND_KHR
11725
11726#ifdef VK_USE_PLATFORM_WIN32_KHR
11727VKAPI_ATTR VkResult VKAPI_CALL CreateWin32SurfaceKHR(VkInstance instance, const VkWin32SurfaceCreateInfoKHR *pCreateInfo,
11728                                                     const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11729    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateWin32SurfaceKHR);
11730}
11731#endif // VK_USE_PLATFORM_WIN32_KHR
11732
11733#ifdef VK_USE_PLATFORM_XCB_KHR
11734VKAPI_ATTR VkResult VKAPI_CALL CreateXcbSurfaceKHR(VkInstance instance, const VkXcbSurfaceCreateInfoKHR *pCreateInfo,
11735                                                   const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11736    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateXcbSurfaceKHR);
11737}
11738#endif // VK_USE_PLATFORM_XCB_KHR
11739
11740#ifdef VK_USE_PLATFORM_XLIB_KHR
11741VKAPI_ATTR VkResult VKAPI_CALL CreateXlibSurfaceKHR(VkInstance instance, const VkXlibSurfaceCreateInfoKHR *pCreateInfo,
11742                                                   const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11743    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateXlibSurfaceKHR);
11744}
11745#endif // VK_USE_PLATFORM_XLIB_KHR
11746
11747
11748VKAPI_ATTR VkResult VKAPI_CALL
11749CreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
11750                             const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) {
11751    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
11752    VkResult res = instance_data->dispatch_table.CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
11753    if (VK_SUCCESS == res) {
11754        std::lock_guard<std::mutex> lock(global_lock);
11755        res = layer_create_msg_callback(instance_data->report_data, false, pCreateInfo, pAllocator, pMsgCallback);
11756    }
11757    return res;
11758}
11759
11760VKAPI_ATTR void VKAPI_CALL DestroyDebugReportCallbackEXT(VkInstance instance,
11761                                                         VkDebugReportCallbackEXT msgCallback,
11762                                                         const VkAllocationCallbacks *pAllocator) {
11763    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
11764    instance_data->dispatch_table.DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
11765    std::lock_guard<std::mutex> lock(global_lock);
11766    layer_destroy_msg_callback(instance_data->report_data, msgCallback, pAllocator);
11767}
11768
11769VKAPI_ATTR void VKAPI_CALL
11770DebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object,
11771                      size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
11772    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
11773    instance_data->dispatch_table.DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, pMsg);
11774}
11775
11776VKAPI_ATTR VkResult VKAPI_CALL
11777EnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
11778    return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
11779}
11780
11781VKAPI_ATTR VkResult VKAPI_CALL
11782EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties *pProperties) {
11783    return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
11784}
11785
11786VKAPI_ATTR VkResult VKAPI_CALL
11787EnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties *pProperties) {
11788    if (pLayerName && !strcmp(pLayerName, global_layer.layerName))
11789        return util_GetExtensionProperties(1, instance_extensions, pCount, pProperties);
11790
11791    return VK_ERROR_LAYER_NOT_PRESENT;
11792}
11793
11794VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
11795                                                                  const char *pLayerName, uint32_t *pCount,
11796                                                                  VkExtensionProperties *pProperties) {
11797    if (pLayerName && !strcmp(pLayerName, global_layer.layerName))
11798        return util_GetExtensionProperties(0, NULL, pCount, pProperties);
11799
11800    assert(physicalDevice);
11801
11802    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11803    return instance_data->dispatch_table.EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties);
11804}
11805
11806static PFN_vkVoidFunction
11807intercept_core_instance_command(const char *name);
11808
11809static PFN_vkVoidFunction
11810intercept_core_device_command(const char *name);
11811
11812static PFN_vkVoidFunction
11813intercept_khr_swapchain_command(const char *name, VkDevice dev);
11814
11815static PFN_vkVoidFunction
11816intercept_khr_surface_command(const char *name, VkInstance instance);
11817
11818VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetDeviceProcAddr(VkDevice dev, const char *funcName) {
11819    PFN_vkVoidFunction proc = intercept_core_device_command(funcName);
11820    if (proc)
11821        return proc;
11822
11823    assert(dev);
11824
11825    proc = intercept_khr_swapchain_command(funcName, dev);
11826    if (proc)
11827        return proc;
11828
11829    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(dev), layer_data_map);
11830
11831    auto &table = dev_data->dispatch_table;
11832    if (!table.GetDeviceProcAddr)
11833        return nullptr;
11834    return table.GetDeviceProcAddr(dev, funcName);
11835}
11836
11837VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetInstanceProcAddr(VkInstance instance, const char *funcName) {
11838    PFN_vkVoidFunction proc = intercept_core_instance_command(funcName);
11839    if (!proc)
11840        proc = intercept_core_device_command(funcName);
11841    if (!proc)
11842        proc = intercept_khr_swapchain_command(funcName, VK_NULL_HANDLE);
11843    if (!proc)
11844        proc = intercept_khr_surface_command(funcName, instance);
11845    if (proc)
11846        return proc;
11847
11848    assert(instance);
11849
11850    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
11851    proc = debug_report_get_instance_proc_addr(instance_data->report_data, funcName);
11852    if (proc)
11853        return proc;
11854
11855    auto &table = instance_data->dispatch_table;
11856    if (!table.GetInstanceProcAddr)
11857        return nullptr;
11858    return table.GetInstanceProcAddr(instance, funcName);
11859}
11860
11861static PFN_vkVoidFunction
11862intercept_core_instance_command(const char *name) {
11863    static const struct {
11864        const char *name;
11865        PFN_vkVoidFunction proc;
11866    } core_instance_commands[] = {
11867        { "vkGetInstanceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetInstanceProcAddr) },
11868        { "vkGetDeviceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceProcAddr) },
11869        { "vkCreateInstance", reinterpret_cast<PFN_vkVoidFunction>(CreateInstance) },
11870        { "vkCreateDevice", reinterpret_cast<PFN_vkVoidFunction>(CreateDevice) },
11871        { "vkEnumeratePhysicalDevices", reinterpret_cast<PFN_vkVoidFunction>(EnumeratePhysicalDevices) },
11872        { "vkGetPhysicalDeviceQueueFamilyProperties", reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceQueueFamilyProperties) },
11873        { "vkDestroyInstance", reinterpret_cast<PFN_vkVoidFunction>(DestroyInstance) },
11874        { "vkEnumerateInstanceLayerProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateInstanceLayerProperties) },
11875        { "vkEnumerateDeviceLayerProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateDeviceLayerProperties) },
11876        { "vkEnumerateInstanceExtensionProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateInstanceExtensionProperties) },
11877        { "vkEnumerateDeviceExtensionProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateDeviceExtensionProperties) },
11878    };
11879
11880    for (size_t i = 0; i < ARRAY_SIZE(core_instance_commands); i++) {
11881        if (!strcmp(core_instance_commands[i].name, name))
11882            return core_instance_commands[i].proc;
11883    }
11884
11885    return nullptr;
11886}
11887
11888static PFN_vkVoidFunction
11889intercept_core_device_command(const char *name) {
11890    static const struct {
11891        const char *name;
11892        PFN_vkVoidFunction proc;
11893    } core_device_commands[] = {
11894        {"vkGetDeviceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceProcAddr)},
11895        {"vkQueueSubmit", reinterpret_cast<PFN_vkVoidFunction>(QueueSubmit)},
11896        {"vkWaitForFences", reinterpret_cast<PFN_vkVoidFunction>(WaitForFences)},
11897        {"vkGetFenceStatus", reinterpret_cast<PFN_vkVoidFunction>(GetFenceStatus)},
11898        {"vkQueueWaitIdle", reinterpret_cast<PFN_vkVoidFunction>(QueueWaitIdle)},
11899        {"vkDeviceWaitIdle", reinterpret_cast<PFN_vkVoidFunction>(DeviceWaitIdle)},
11900        {"vkGetDeviceQueue", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceQueue)},
11901        {"vkDestroyInstance", reinterpret_cast<PFN_vkVoidFunction>(DestroyInstance)},
11902        {"vkDestroyDevice", reinterpret_cast<PFN_vkVoidFunction>(DestroyDevice)},
11903        {"vkDestroyFence", reinterpret_cast<PFN_vkVoidFunction>(DestroyFence)},
11904        {"vkResetFences", reinterpret_cast<PFN_vkVoidFunction>(ResetFences)},
11905        {"vkDestroySemaphore", reinterpret_cast<PFN_vkVoidFunction>(DestroySemaphore)},
11906        {"vkDestroyEvent", reinterpret_cast<PFN_vkVoidFunction>(DestroyEvent)},
11907        {"vkDestroyQueryPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyQueryPool)},
11908        {"vkDestroyBuffer", reinterpret_cast<PFN_vkVoidFunction>(DestroyBuffer)},
11909        {"vkDestroyBufferView", reinterpret_cast<PFN_vkVoidFunction>(DestroyBufferView)},
11910        {"vkDestroyImage", reinterpret_cast<PFN_vkVoidFunction>(DestroyImage)},
11911        {"vkDestroyImageView", reinterpret_cast<PFN_vkVoidFunction>(DestroyImageView)},
11912        {"vkDestroyShaderModule", reinterpret_cast<PFN_vkVoidFunction>(DestroyShaderModule)},
11913        {"vkDestroyPipeline", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipeline)},
11914        {"vkDestroyPipelineLayout", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipelineLayout)},
11915        {"vkDestroySampler", reinterpret_cast<PFN_vkVoidFunction>(DestroySampler)},
11916        {"vkDestroyDescriptorSetLayout", reinterpret_cast<PFN_vkVoidFunction>(DestroyDescriptorSetLayout)},
11917        {"vkDestroyDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyDescriptorPool)},
11918        {"vkDestroyFramebuffer", reinterpret_cast<PFN_vkVoidFunction>(DestroyFramebuffer)},
11919        {"vkDestroyRenderPass", reinterpret_cast<PFN_vkVoidFunction>(DestroyRenderPass)},
11920        {"vkCreateBuffer", reinterpret_cast<PFN_vkVoidFunction>(CreateBuffer)},
11921        {"vkCreateBufferView", reinterpret_cast<PFN_vkVoidFunction>(CreateBufferView)},
11922        {"vkCreateImage", reinterpret_cast<PFN_vkVoidFunction>(CreateImage)},
11923        {"vkCreateImageView", reinterpret_cast<PFN_vkVoidFunction>(CreateImageView)},
11924        {"vkCreateFence", reinterpret_cast<PFN_vkVoidFunction>(CreateFence)},
11925        {"vkCreatePipelineCache", reinterpret_cast<PFN_vkVoidFunction>(CreatePipelineCache)},
11926        {"vkDestroyPipelineCache", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipelineCache)},
11927        {"vkGetPipelineCacheData", reinterpret_cast<PFN_vkVoidFunction>(GetPipelineCacheData)},
11928        {"vkMergePipelineCaches", reinterpret_cast<PFN_vkVoidFunction>(MergePipelineCaches)},
11929        {"vkCreateGraphicsPipelines", reinterpret_cast<PFN_vkVoidFunction>(CreateGraphicsPipelines)},
11930        {"vkCreateComputePipelines", reinterpret_cast<PFN_vkVoidFunction>(CreateComputePipelines)},
11931        {"vkCreateSampler", reinterpret_cast<PFN_vkVoidFunction>(CreateSampler)},
11932        {"vkCreateDescriptorSetLayout", reinterpret_cast<PFN_vkVoidFunction>(CreateDescriptorSetLayout)},
11933        {"vkCreatePipelineLayout", reinterpret_cast<PFN_vkVoidFunction>(CreatePipelineLayout)},
11934        {"vkCreateDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(CreateDescriptorPool)},
11935        {"vkResetDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(ResetDescriptorPool)},
11936        {"vkAllocateDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(AllocateDescriptorSets)},
11937        {"vkFreeDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(FreeDescriptorSets)},
11938        {"vkUpdateDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(UpdateDescriptorSets)},
11939        {"vkCreateCommandPool", reinterpret_cast<PFN_vkVoidFunction>(CreateCommandPool)},
11940        {"vkDestroyCommandPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyCommandPool)},
11941        {"vkResetCommandPool", reinterpret_cast<PFN_vkVoidFunction>(ResetCommandPool)},
11942        {"vkCreateQueryPool", reinterpret_cast<PFN_vkVoidFunction>(CreateQueryPool)},
11943        {"vkAllocateCommandBuffers", reinterpret_cast<PFN_vkVoidFunction>(AllocateCommandBuffers)},
11944        {"vkFreeCommandBuffers", reinterpret_cast<PFN_vkVoidFunction>(FreeCommandBuffers)},
11945        {"vkBeginCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(BeginCommandBuffer)},
11946        {"vkEndCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(EndCommandBuffer)},
11947        {"vkResetCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(ResetCommandBuffer)},
11948        {"vkCmdBindPipeline", reinterpret_cast<PFN_vkVoidFunction>(CmdBindPipeline)},
11949        {"vkCmdSetViewport", reinterpret_cast<PFN_vkVoidFunction>(CmdSetViewport)},
11950        {"vkCmdSetScissor", reinterpret_cast<PFN_vkVoidFunction>(CmdSetScissor)},
11951        {"vkCmdSetLineWidth", reinterpret_cast<PFN_vkVoidFunction>(CmdSetLineWidth)},
11952        {"vkCmdSetDepthBias", reinterpret_cast<PFN_vkVoidFunction>(CmdSetDepthBias)},
11953        {"vkCmdSetBlendConstants", reinterpret_cast<PFN_vkVoidFunction>(CmdSetBlendConstants)},
11954        {"vkCmdSetDepthBounds", reinterpret_cast<PFN_vkVoidFunction>(CmdSetDepthBounds)},
11955        {"vkCmdSetStencilCompareMask", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilCompareMask)},
11956        {"vkCmdSetStencilWriteMask", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilWriteMask)},
11957        {"vkCmdSetStencilReference", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilReference)},
11958        {"vkCmdBindDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(CmdBindDescriptorSets)},
11959        {"vkCmdBindVertexBuffers", reinterpret_cast<PFN_vkVoidFunction>(CmdBindVertexBuffers)},
11960        {"vkCmdBindIndexBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdBindIndexBuffer)},
11961        {"vkCmdDraw", reinterpret_cast<PFN_vkVoidFunction>(CmdDraw)},
11962        {"vkCmdDrawIndexed", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndexed)},
11963        {"vkCmdDrawIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndirect)},
11964        {"vkCmdDrawIndexedIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndexedIndirect)},
11965        {"vkCmdDispatch", reinterpret_cast<PFN_vkVoidFunction>(CmdDispatch)},
11966        {"vkCmdDispatchIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDispatchIndirect)},
11967        {"vkCmdCopyBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyBuffer)},
11968        {"vkCmdCopyImage", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyImage)},
11969        {"vkCmdBlitImage", reinterpret_cast<PFN_vkVoidFunction>(CmdBlitImage)},
11970        {"vkCmdCopyBufferToImage", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyBufferToImage)},
11971        {"vkCmdCopyImageToBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyImageToBuffer)},
11972        {"vkCmdUpdateBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdUpdateBuffer)},
11973        {"vkCmdFillBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdFillBuffer)},
11974        {"vkCmdClearColorImage", reinterpret_cast<PFN_vkVoidFunction>(CmdClearColorImage)},
11975        {"vkCmdClearDepthStencilImage", reinterpret_cast<PFN_vkVoidFunction>(CmdClearDepthStencilImage)},
11976        {"vkCmdClearAttachments", reinterpret_cast<PFN_vkVoidFunction>(CmdClearAttachments)},
11977        {"vkCmdResolveImage", reinterpret_cast<PFN_vkVoidFunction>(CmdResolveImage)},
11978        {"vkCmdSetEvent", reinterpret_cast<PFN_vkVoidFunction>(CmdSetEvent)},
11979        {"vkCmdResetEvent", reinterpret_cast<PFN_vkVoidFunction>(CmdResetEvent)},
11980        {"vkCmdWaitEvents", reinterpret_cast<PFN_vkVoidFunction>(CmdWaitEvents)},
11981        {"vkCmdPipelineBarrier", reinterpret_cast<PFN_vkVoidFunction>(CmdPipelineBarrier)},
11982        {"vkCmdBeginQuery", reinterpret_cast<PFN_vkVoidFunction>(CmdBeginQuery)},
11983        {"vkCmdEndQuery", reinterpret_cast<PFN_vkVoidFunction>(CmdEndQuery)},
11984        {"vkCmdResetQueryPool", reinterpret_cast<PFN_vkVoidFunction>(CmdResetQueryPool)},
11985        {"vkCmdCopyQueryPoolResults", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyQueryPoolResults)},
11986        {"vkCmdPushConstants", reinterpret_cast<PFN_vkVoidFunction>(CmdPushConstants)},
11987        {"vkCmdWriteTimestamp", reinterpret_cast<PFN_vkVoidFunction>(CmdWriteTimestamp)},
11988        {"vkCreateFramebuffer", reinterpret_cast<PFN_vkVoidFunction>(CreateFramebuffer)},
11989        {"vkCreateShaderModule", reinterpret_cast<PFN_vkVoidFunction>(CreateShaderModule)},
11990        {"vkCreateRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CreateRenderPass)},
11991        {"vkCmdBeginRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CmdBeginRenderPass)},
11992        {"vkCmdNextSubpass", reinterpret_cast<PFN_vkVoidFunction>(CmdNextSubpass)},
11993        {"vkCmdEndRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CmdEndRenderPass)},
11994        {"vkCmdExecuteCommands", reinterpret_cast<PFN_vkVoidFunction>(CmdExecuteCommands)},
11995        {"vkSetEvent", reinterpret_cast<PFN_vkVoidFunction>(SetEvent)},
11996        {"vkMapMemory", reinterpret_cast<PFN_vkVoidFunction>(MapMemory)},
11997        {"vkUnmapMemory", reinterpret_cast<PFN_vkVoidFunction>(UnmapMemory)},
11998        {"vkFlushMappedMemoryRanges", reinterpret_cast<PFN_vkVoidFunction>(FlushMappedMemoryRanges)},
11999        {"vkInvalidateMappedMemoryRanges", reinterpret_cast<PFN_vkVoidFunction>(InvalidateMappedMemoryRanges)},
12000        {"vkAllocateMemory", reinterpret_cast<PFN_vkVoidFunction>(AllocateMemory)},
12001        {"vkFreeMemory", reinterpret_cast<PFN_vkVoidFunction>(FreeMemory)},
12002        {"vkBindBufferMemory", reinterpret_cast<PFN_vkVoidFunction>(BindBufferMemory)},
12003        {"vkGetBufferMemoryRequirements", reinterpret_cast<PFN_vkVoidFunction>(GetBufferMemoryRequirements)},
12004        {"vkGetImageMemoryRequirements", reinterpret_cast<PFN_vkVoidFunction>(GetImageMemoryRequirements)},
12005        {"vkGetQueryPoolResults", reinterpret_cast<PFN_vkVoidFunction>(GetQueryPoolResults)},
12006        {"vkBindImageMemory", reinterpret_cast<PFN_vkVoidFunction>(BindImageMemory)},
12007        {"vkQueueBindSparse", reinterpret_cast<PFN_vkVoidFunction>(QueueBindSparse)},
12008        {"vkCreateSemaphore", reinterpret_cast<PFN_vkVoidFunction>(CreateSemaphore)},
12009        {"vkCreateEvent", reinterpret_cast<PFN_vkVoidFunction>(CreateEvent)},
12010    };
12011
12012    for (size_t i = 0; i < ARRAY_SIZE(core_device_commands); i++) {
12013        if (!strcmp(core_device_commands[i].name, name))
12014            return core_device_commands[i].proc;
12015    }
12016
12017    return nullptr;
12018}
12019
12020static PFN_vkVoidFunction
12021intercept_khr_swapchain_command(const char *name, VkDevice dev) {
12022    static const struct {
12023        const char *name;
12024        PFN_vkVoidFunction proc;
12025    } khr_swapchain_commands[] = {
12026        { "vkCreateSwapchainKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateSwapchainKHR) },
12027        { "vkDestroySwapchainKHR", reinterpret_cast<PFN_vkVoidFunction>(DestroySwapchainKHR) },
12028        { "vkGetSwapchainImagesKHR", reinterpret_cast<PFN_vkVoidFunction>(GetSwapchainImagesKHR) },
12029        { "vkAcquireNextImageKHR", reinterpret_cast<PFN_vkVoidFunction>(AcquireNextImageKHR) },
12030        { "vkQueuePresentKHR", reinterpret_cast<PFN_vkVoidFunction>(QueuePresentKHR) },
12031    };
12032    layer_data *dev_data = nullptr;
12033
12034    if (dev) {
12035        dev_data = get_my_data_ptr(get_dispatch_key(dev), layer_data_map);
12036        if (!dev_data->device_extensions.wsi_enabled)
12037            return nullptr;
12038    }
12039
12040    for (size_t i = 0; i < ARRAY_SIZE(khr_swapchain_commands); i++) {
12041        if (!strcmp(khr_swapchain_commands[i].name, name))
12042            return khr_swapchain_commands[i].proc;
12043    }
12044
12045    if (dev_data) {
12046        if (!dev_data->device_extensions.wsi_display_swapchain_enabled)
12047            return nullptr;
12048    }
12049
12050    if (!strcmp("vkCreateSharedSwapchainsKHR", name))
12051        return reinterpret_cast<PFN_vkVoidFunction>(CreateSharedSwapchainsKHR);
12052
12053    return nullptr;
12054}
12055
12056static PFN_vkVoidFunction
12057intercept_khr_surface_command(const char *name, VkInstance instance) {
12058    static const struct {
12059        const char *name;
12060        PFN_vkVoidFunction proc;
12061        bool instance_layer_data::*enable;
12062    } khr_surface_commands[] = {
12063#ifdef VK_USE_PLATFORM_ANDROID_KHR
12064        {"vkCreateAndroidSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateAndroidSurfaceKHR),
12065            &instance_layer_data::androidSurfaceExtensionEnabled},
12066#endif // VK_USE_PLATFORM_ANDROID_KHR
12067#ifdef VK_USE_PLATFORM_MIR_KHR
12068        {"vkCreateMirSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateMirSurfaceKHR),
12069            &instance_layer_data::mirSurfaceExtensionEnabled},
12070#endif // VK_USE_PLATFORM_MIR_KHR
12071#ifdef VK_USE_PLATFORM_WAYLAND_KHR
12072        {"vkCreateWaylandSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateWaylandSurfaceKHR),
12073            &instance_layer_data::waylandSurfaceExtensionEnabled},
12074#endif // VK_USE_PLATFORM_WAYLAND_KHR
12075#ifdef VK_USE_PLATFORM_WIN32_KHR
12076        {"vkCreateWin32SurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateWin32SurfaceKHR),
12077            &instance_layer_data::win32SurfaceExtensionEnabled},
12078#endif // VK_USE_PLATFORM_WIN32_KHR
12079#ifdef VK_USE_PLATFORM_XCB_KHR
12080        {"vkCreateXcbSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateXcbSurfaceKHR),
12081            &instance_layer_data::xcbSurfaceExtensionEnabled},
12082#endif // VK_USE_PLATFORM_XCB_KHR
12083#ifdef VK_USE_PLATFORM_XLIB_KHR
12084        {"vkCreateXlibSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateXlibSurfaceKHR),
12085            &instance_layer_data::xlibSurfaceExtensionEnabled},
12086#endif // VK_USE_PLATFORM_XLIB_KHR
12087        {"vkDestroySurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(DestroySurfaceKHR),
12088            &instance_layer_data::surfaceExtensionEnabled},
12089    };
12090
12091    instance_layer_data *instance_data = nullptr;
12092    if (instance) {
12093        instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
12094    }
12095
12096    for (size_t i = 0; i < ARRAY_SIZE(khr_surface_commands); i++) {
12097        if (!strcmp(khr_surface_commands[i].name, name)) {
12098            if (instance_data && !(instance_data->*(khr_surface_commands[i].enable)))
12099                return nullptr;
12100            return khr_surface_commands[i].proc;
12101        }
12102    }
12103
12104    return nullptr;
12105}
12106
12107} // namespace core_validation
12108
12109// vk_layer_logging.h expects these to be defined
12110
12111VKAPI_ATTR VkResult VKAPI_CALL
12112vkCreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
12113                               const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) {
12114    return core_validation::CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
12115}
12116
12117VKAPI_ATTR void VKAPI_CALL
12118vkDestroyDebugReportCallbackEXT(VkInstance instance,
12119                                VkDebugReportCallbackEXT msgCallback,
12120                                const VkAllocationCallbacks *pAllocator) {
12121    core_validation::DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
12122}
12123
12124VKAPI_ATTR void VKAPI_CALL
12125vkDebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object,
12126                        size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
12127    core_validation::DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, pMsg);
12128}
12129
12130// loader-layer interface v0, just wrappers since there is only a layer
12131
12132VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
12133vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties *pProperties) {
12134    return core_validation::EnumerateInstanceExtensionProperties(pLayerName, pCount, pProperties);
12135}
12136
12137VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
12138vkEnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
12139    return core_validation::EnumerateInstanceLayerProperties(pCount, pProperties);
12140}
12141
12142VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
12143vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties *pProperties) {
12144    // the layer command handles VK_NULL_HANDLE just fine internally
12145    assert(physicalDevice == VK_NULL_HANDLE);
12146    return core_validation::EnumerateDeviceLayerProperties(VK_NULL_HANDLE, pCount, pProperties);
12147}
12148
12149VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
12150                                                                                    const char *pLayerName, uint32_t *pCount,
12151                                                                                    VkExtensionProperties *pProperties) {
12152    // the layer command handles VK_NULL_HANDLE just fine internally
12153    assert(physicalDevice == VK_NULL_HANDLE);
12154    return core_validation::EnumerateDeviceExtensionProperties(VK_NULL_HANDLE, pLayerName, pCount, pProperties);
12155}
12156
12157VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) {
12158    return core_validation::GetDeviceProcAddr(dev, funcName);
12159}
12160
12161VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) {
12162    return core_validation::GetInstanceProcAddr(instance, funcName);
12163}
12164