core_validation.cpp revision 03ea795b83fdf0099594808a1a57064dea7f02a1
1/* Copyright (c) 2015-2016 The Khronos Group Inc.
2 * Copyright (c) 2015-2016 Valve Corporation
3 * Copyright (c) 2015-2016 LunarG, Inc.
4 * Copyright (C) 2015-2016 Google Inc.
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 *     http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * Author: Cody Northrop <cnorthrop@google.com>
19 * Author: Michael Lentine <mlentine@google.com>
20 * Author: Tobin Ehlis <tobine@google.com>
21 * Author: Chia-I Wu <olv@google.com>
22 * Author: Chris Forbes <chrisf@ijw.co.nz>
23 * Author: Mark Lobodzinski <mark@lunarg.com>
24 * Author: Ian Elliott <ianelliott@google.com>
25 */
26
27// Allow use of STL min and max functions in Windows
28#define NOMINMAX
29
30// Turn on mem_tracker merged code
31#define MTMERGESOURCE 1
32
33#include <SPIRV/spirv.hpp>
34#include <algorithm>
35#include <assert.h>
36#include <iostream>
37#include <list>
38#include <map>
39#include <mutex>
40#include <set>
41//#include <memory>
42#include <stdio.h>
43#include <stdlib.h>
44#include <string.h>
45#include <string>
46#include <tuple>
47
48#include "vk_loader_platform.h"
49#include "vk_dispatch_table_helper.h"
50#include "vk_struct_string_helper_cpp.h"
51#if defined(__GNUC__)
52#pragma GCC diagnostic ignored "-Wwrite-strings"
53#endif
54#if defined(__GNUC__)
55#pragma GCC diagnostic warning "-Wwrite-strings"
56#endif
57#include "vk_struct_size_helper.h"
58#include "core_validation.h"
59#include "vk_layer_table.h"
60#include "vk_layer_data.h"
61#include "vk_layer_extension_utils.h"
62#include "vk_layer_utils.h"
63#include "spirv-tools/libspirv.h"
64
65#if defined __ANDROID__
66#include <android/log.h>
67#define LOGCONSOLE(...) ((void)__android_log_print(ANDROID_LOG_INFO, "DS", __VA_ARGS__))
68#else
69#define LOGCONSOLE(...)                                                                                                            \
70    {                                                                                                                              \
71        printf(__VA_ARGS__);                                                                                                       \
72        printf("\n");                                                                                                              \
73    }
74#endif
75
76using namespace std;
77
78namespace core_validation {
79
80using std::unordered_map;
81using std::unordered_set;
82
83// WSI Image Objects bypass usual Image Object creation methods.  A special Memory
84// Object value will be used to identify them internally.
85static const VkDeviceMemory MEMTRACKER_SWAP_CHAIN_IMAGE_KEY = (VkDeviceMemory)(-1);
86
87struct devExts {
88    bool wsi_enabled;
89    bool wsi_display_swapchain_enabled;
90    unordered_map<VkSwapchainKHR, unique_ptr<SWAPCHAIN_NODE>> swapchainMap;
91    unordered_map<VkImage, VkSwapchainKHR> imageToSwapchainMap;
92};
93
94// fwd decls
95struct shader_module;
96
97// TODO : Split this into separate structs for instance and device level data?
98struct layer_data {
99    VkInstance instance;
100    unique_ptr<INSTANCE_STATE> instance_state;
101
102
103    debug_report_data *report_data;
104    std::vector<VkDebugReportCallbackEXT> logging_callback;
105    VkLayerDispatchTable *device_dispatch_table;
106    VkLayerInstanceDispatchTable *instance_dispatch_table;
107
108    devExts device_extensions;
109    unordered_set<VkQueue> queues;  // All queues under given device
110    // Vector indices correspond to queueFamilyIndex
111    vector<unique_ptr<VkQueueFamilyProperties>> queue_family_properties;
112    // Global set of all cmdBuffers that are inFlight on this device
113    unordered_set<VkCommandBuffer> globalInFlightCmdBuffers;
114    // Layer specific data
115    unordered_map<VkSampler, unique_ptr<SAMPLER_NODE>> samplerMap;
116    unordered_map<VkImageView, unique_ptr<IMAGE_VIEW_STATE>> imageViewMap;
117    unordered_map<VkImage, unique_ptr<IMAGE_NODE>> imageMap;
118    unordered_map<VkBufferView, unique_ptr<BUFFER_VIEW_STATE>> bufferViewMap;
119    unordered_map<VkBuffer, unique_ptr<BUFFER_NODE>> bufferMap;
120    unordered_map<VkPipeline, PIPELINE_NODE *> pipelineMap;
121    unordered_map<VkCommandPool, COMMAND_POOL_NODE> commandPoolMap;
122    unordered_map<VkDescriptorPool, DESCRIPTOR_POOL_NODE *> descriptorPoolMap;
123    unordered_map<VkDescriptorSet, cvdescriptorset::DescriptorSet *> setMap;
124    unordered_map<VkDescriptorSetLayout, cvdescriptorset::DescriptorSetLayout *> descriptorSetLayoutMap;
125    unordered_map<VkPipelineLayout, PIPELINE_LAYOUT_NODE> pipelineLayoutMap;
126    unordered_map<VkDeviceMemory, unique_ptr<DEVICE_MEM_INFO>> memObjMap;
127    unordered_map<VkFence, FENCE_NODE> fenceMap;
128    unordered_map<VkQueue, QUEUE_NODE> queueMap;
129    unordered_map<VkEvent, EVENT_NODE> eventMap;
130    unordered_map<QueryObject, bool> queryToStateMap;
131    unordered_map<VkQueryPool, QUERY_POOL_NODE> queryPoolMap;
132    unordered_map<VkSemaphore, SEMAPHORE_NODE> semaphoreMap;
133    unordered_map<VkCommandBuffer, GLOBAL_CB_NODE *> commandBufferMap;
134    unordered_map<VkFramebuffer, unique_ptr<FRAMEBUFFER_NODE>> frameBufferMap;
135    unordered_map<VkImage, vector<ImageSubresourcePair>> imageSubresourceMap;
136    unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> imageLayoutMap;
137    unordered_map<VkRenderPass, RENDER_PASS_NODE *> renderPassMap;
138    unordered_map<VkShaderModule, unique_ptr<shader_module>> shaderModuleMap;
139    VkDevice device;
140
141    // Device specific data
142    PHYS_DEV_PROPERTIES_NODE phys_dev_properties;
143    VkPhysicalDeviceMemoryProperties phys_dev_mem_props;
144    VkPhysicalDeviceFeatures physical_device_features;
145    unique_ptr<PHYSICAL_DEVICE_STATE> physical_device_state;
146
147    layer_data()
148        : instance_state(nullptr), report_data(nullptr), device_dispatch_table(nullptr), instance_dispatch_table(nullptr),
149          device_extensions(), device(VK_NULL_HANDLE), phys_dev_properties{}, phys_dev_mem_props{}, physical_device_features{},
150          physical_device_state(nullptr){};
151};
152
153// TODO : Do we need to guard access to layer_data_map w/ lock?
154static unordered_map<void *, layer_data *> layer_data_map;
155
156static const VkLayerProperties global_layer = {
157    "VK_LAYER_LUNARG_core_validation", VK_LAYER_API_VERSION, 1, "LunarG Validation Layer",
158};
159
160template <class TCreateInfo> void ValidateLayerOrdering(const TCreateInfo &createInfo) {
161    bool foundLayer = false;
162    for (uint32_t i = 0; i < createInfo.enabledLayerCount; ++i) {
163        if (!strcmp(createInfo.ppEnabledLayerNames[i], global_layer.layerName)) {
164            foundLayer = true;
165        }
166        // This has to be logged to console as we don't have a callback at this point.
167        if (!foundLayer && !strcmp(createInfo.ppEnabledLayerNames[0], "VK_LAYER_GOOGLE_unique_objects")) {
168            LOGCONSOLE("Cannot activate layer VK_LAYER_GOOGLE_unique_objects prior to activating %s.",
169                       global_layer.layerName);
170        }
171    }
172}
173
174// Code imported from shader_checker
175static void build_def_index(shader_module *);
176
177// A forward iterator over spirv instructions. Provides easy access to len, opcode, and content words
178// without the caller needing to care too much about the physical SPIRV module layout.
179struct spirv_inst_iter {
180    std::vector<uint32_t>::const_iterator zero;
181    std::vector<uint32_t>::const_iterator it;
182
183    uint32_t len() {
184        auto result = *it >> 16;
185        assert(result > 0);
186        return result;
187    }
188
189    uint32_t opcode() { return *it & 0x0ffffu; }
190
191    uint32_t const &word(unsigned n) {
192        assert(n < len());
193        return it[n];
194    }
195
196    uint32_t offset() { return (uint32_t)(it - zero); }
197
198    spirv_inst_iter() {}
199
200    spirv_inst_iter(std::vector<uint32_t>::const_iterator zero, std::vector<uint32_t>::const_iterator it) : zero(zero), it(it) {}
201
202    bool operator==(spirv_inst_iter const &other) { return it == other.it; }
203
204    bool operator!=(spirv_inst_iter const &other) { return it != other.it; }
205
206    spirv_inst_iter operator++(int) { /* x++ */
207        spirv_inst_iter ii = *this;
208        it += len();
209        return ii;
210    }
211
212    spirv_inst_iter operator++() { /* ++x; */
213        it += len();
214        return *this;
215    }
216
217    /* The iterator and the value are the same thing. */
218    spirv_inst_iter &operator*() { return *this; }
219    spirv_inst_iter const &operator*() const { return *this; }
220};
221
222struct shader_module {
223    /* the spirv image itself */
224    vector<uint32_t> words;
225    /* a mapping of <id> to the first word of its def. this is useful because walking type
226     * trees, constant expressions, etc requires jumping all over the instruction stream.
227     */
228    unordered_map<unsigned, unsigned> def_index;
229
230    shader_module(VkShaderModuleCreateInfo const *pCreateInfo)
231        : words((uint32_t *)pCreateInfo->pCode, (uint32_t *)pCreateInfo->pCode + pCreateInfo->codeSize / sizeof(uint32_t)),
232          def_index() {
233
234        build_def_index(this);
235    }
236
237    /* expose begin() / end() to enable range-based for */
238    spirv_inst_iter begin() const { return spirv_inst_iter(words.begin(), words.begin() + 5); } /* first insn */
239    spirv_inst_iter end() const { return spirv_inst_iter(words.begin(), words.end()); }         /* just past last insn */
240    /* given an offset into the module, produce an iterator there. */
241    spirv_inst_iter at(unsigned offset) const { return spirv_inst_iter(words.begin(), words.begin() + offset); }
242
243    /* gets an iterator to the definition of an id */
244    spirv_inst_iter get_def(unsigned id) const {
245        auto it = def_index.find(id);
246        if (it == def_index.end()) {
247            return end();
248        }
249        return at(it->second);
250    }
251};
252
253// TODO : This can be much smarter, using separate locks for separate global data
254static std::mutex global_lock;
255
256// Return IMAGE_VIEW_STATE ptr for specified imageView or else NULL
257IMAGE_VIEW_STATE *getImageViewState(const layer_data *dev_data, VkImageView image_view) {
258    auto iv_it = dev_data->imageViewMap.find(image_view);
259    if (iv_it == dev_data->imageViewMap.end()) {
260        return nullptr;
261    }
262    return iv_it->second.get();
263}
264// Return sampler node ptr for specified sampler or else NULL
265SAMPLER_NODE *getSamplerNode(const layer_data *dev_data, VkSampler sampler) {
266    auto sampler_it = dev_data->samplerMap.find(sampler);
267    if (sampler_it == dev_data->samplerMap.end()) {
268        return nullptr;
269    }
270    return sampler_it->second.get();
271}
272// Return image node ptr for specified image or else NULL
273IMAGE_NODE *getImageNode(const layer_data *dev_data, VkImage image) {
274    auto img_it = dev_data->imageMap.find(image);
275    if (img_it == dev_data->imageMap.end()) {
276        return nullptr;
277    }
278    return img_it->second.get();
279}
280// Return buffer node ptr for specified buffer or else NULL
281BUFFER_NODE *getBufferNode(const layer_data *dev_data, VkBuffer buffer) {
282    auto buff_it = dev_data->bufferMap.find(buffer);
283    if (buff_it == dev_data->bufferMap.end()) {
284        return nullptr;
285    }
286    return buff_it->second.get();
287}
288// Return swapchain node for specified swapchain or else NULL
289SWAPCHAIN_NODE *getSwapchainNode(const layer_data *dev_data, VkSwapchainKHR swapchain) {
290    auto swp_it = dev_data->device_extensions.swapchainMap.find(swapchain);
291    if (swp_it == dev_data->device_extensions.swapchainMap.end()) {
292        return nullptr;
293    }
294    return swp_it->second.get();
295}
296// Return swapchain for specified image or else NULL
297VkSwapchainKHR getSwapchainFromImage(const layer_data *dev_data, VkImage image) {
298    auto img_it = dev_data->device_extensions.imageToSwapchainMap.find(image);
299    if (img_it == dev_data->device_extensions.imageToSwapchainMap.end()) {
300        return VK_NULL_HANDLE;
301    }
302    return img_it->second;
303}
304// Return buffer node ptr for specified buffer or else NULL
305BUFFER_VIEW_STATE *getBufferViewState(const layer_data *my_data, VkBufferView buffer_view) {
306    auto bv_it = my_data->bufferViewMap.find(buffer_view);
307    if (bv_it == my_data->bufferViewMap.end()) {
308        return nullptr;
309    }
310    return bv_it->second.get();
311}
312
313FENCE_NODE *getFenceNode(layer_data *dev_data, VkFence fence) {
314    auto it = dev_data->fenceMap.find(fence);
315    if (it == dev_data->fenceMap.end()) {
316        return nullptr;
317    }
318    return &it->second;
319}
320
321EVENT_NODE *getEventNode(layer_data *dev_data, VkEvent event) {
322    auto it = dev_data->eventMap.find(event);
323    if (it == dev_data->eventMap.end()) {
324        return nullptr;
325    }
326    return &it->second;
327}
328
329QUERY_POOL_NODE *getQueryPoolNode(layer_data *dev_data, VkQueryPool query_pool) {
330    auto it = dev_data->queryPoolMap.find(query_pool);
331    if (it == dev_data->queryPoolMap.end()) {
332        return nullptr;
333    }
334    return &it->second;
335}
336
337QUEUE_NODE *getQueueNode(layer_data *dev_data, VkQueue queue) {
338    auto it = dev_data->queueMap.find(queue);
339    if (it == dev_data->queueMap.end()) {
340        return nullptr;
341    }
342    return &it->second;
343}
344
345SEMAPHORE_NODE *getSemaphoreNode(layer_data *dev_data, VkSemaphore semaphore) {
346    auto it = dev_data->semaphoreMap.find(semaphore);
347    if (it == dev_data->semaphoreMap.end()) {
348        return nullptr;
349    }
350    return &it->second;
351}
352
353COMMAND_POOL_NODE *getCommandPoolNode(layer_data *dev_data, VkCommandPool pool) {
354    auto it = dev_data->commandPoolMap.find(pool);
355    if (it == dev_data->commandPoolMap.end()) {
356        return nullptr;
357    }
358    return &it->second;
359}
360
361static VkDeviceMemory *get_object_mem_binding(layer_data *my_data, uint64_t handle, VkDebugReportObjectTypeEXT type) {
362    switch (type) {
363    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
364        auto img_node = getImageNode(my_data, VkImage(handle));
365        if (img_node)
366            return &img_node->mem;
367        break;
368    }
369    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
370        auto buff_node = getBufferNode(my_data, VkBuffer(handle));
371        if (buff_node)
372            return &buff_node->mem;
373        break;
374    }
375    default:
376        break;
377    }
378    return nullptr;
379}
380
381// prototype
382static GLOBAL_CB_NODE *getCBNode(layer_data const *, const VkCommandBuffer);
383
384// Helper function to validate correct usage bits set for buffers or images
385//  Verify that (actual & desired) flags != 0 or,
386//   if strict is true, verify that (actual & desired) flags == desired
387//  In case of error, report it via dbg callbacks
388static bool validate_usage_flags(layer_data *my_data, VkFlags actual, VkFlags desired, VkBool32 strict,
389                                     uint64_t obj_handle, VkDebugReportObjectTypeEXT obj_type, char const *ty_str,
390                                     char const *func_name, char const *usage_str) {
391    bool correct_usage = false;
392    bool skip_call = false;
393    if (strict)
394        correct_usage = ((actual & desired) == desired);
395    else
396        correct_usage = ((actual & desired) != 0);
397    if (!correct_usage) {
398        skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_type, obj_handle, __LINE__,
399                            MEMTRACK_INVALID_USAGE_FLAG, "MEM", "Invalid usage flag for %s 0x%" PRIxLEAST64
400                                                                " used by %s. In this case, %s should have %s set during creation.",
401                            ty_str, obj_handle, func_name, ty_str, usage_str);
402    }
403    return skip_call;
404}
405
406// Helper function to validate usage flags for buffers
407// For given buffer_node send actual vs. desired usage off to helper above where
408//  an error will be flagged if usage is not correct
409static bool ValidateImageUsageFlags(layer_data *dev_data, IMAGE_NODE const *image_node, VkFlags desired, VkBool32 strict,
410                                    char const *func_name, char const *usage_string) {
411    return validate_usage_flags(dev_data, image_node->createInfo.usage, desired, strict,
412                                reinterpret_cast<const uint64_t &>(image_node->image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
413                                "image", func_name, usage_string);
414}
415
416// Helper function to validate usage flags for buffers
417// For given buffer_node send actual vs. desired usage off to helper above where
418//  an error will be flagged if usage is not correct
419static bool ValidateBufferUsageFlags(layer_data *dev_data, BUFFER_NODE const *buffer_node, VkFlags desired, VkBool32 strict,
420                                     char const *func_name, char const *usage_string) {
421    return validate_usage_flags(dev_data, buffer_node->createInfo.usage, desired, strict,
422                                reinterpret_cast<const uint64_t &>(buffer_node->buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
423                                "buffer", func_name, usage_string);
424}
425
426// Return ptr to info in map container containing mem, or NULL if not found
427//  Calls to this function should be wrapped in mutex
428DEVICE_MEM_INFO *getMemObjInfo(const layer_data *dev_data, const VkDeviceMemory mem) {
429    auto mem_it = dev_data->memObjMap.find(mem);
430    if (mem_it == dev_data->memObjMap.end()) {
431        return NULL;
432    }
433    return mem_it->second.get();
434}
435
436static void add_mem_obj_info(layer_data *my_data, void *object, const VkDeviceMemory mem,
437                             const VkMemoryAllocateInfo *pAllocateInfo) {
438    assert(object != NULL);
439
440    my_data->memObjMap[mem] = unique_ptr<DEVICE_MEM_INFO>(new DEVICE_MEM_INFO(object, mem, pAllocateInfo));
441}
442
443// Helper function to print lowercase string of object type
444//  TODO: Unify string helper functions, this should really come out of a string helper if not there already
445static const char *object_type_to_string(VkDebugReportObjectTypeEXT type) {
446    switch (type) {
447    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT:
448        return "image";
449    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
450        return "buffer";
451    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT:
452        return "image view";
453    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT:
454        return "buffer view";
455    case VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT:
456        return "swapchain";
457    case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT:
458        return "descriptor set";
459    case VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT:
460        return "framebuffer";
461    case VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT:
462        return "event";
463    case VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT:
464        return "query pool";
465    case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT:
466        return "descriptor pool";
467    case VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT:
468        return "command pool";
469    case VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT:
470        return "pipeline";
471    case VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT:
472        return "sampler";
473    case VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT:
474        return "renderpass";
475    case VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT:
476        return "device memory";
477    case VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT:
478        return "semaphore";
479    default:
480        return "unknown";
481    }
482}
483
484// For given bound_object_handle, bound to given mem allocation, verify that the range for the bound object is valid
485static bool ValidateMemoryIsValid(layer_data *dev_data, VkDeviceMemory mem, uint64_t bound_object_handle,
486                                  VkDebugReportObjectTypeEXT type, const char *functionName) {
487    DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem);
488    if (mem_info) {
489        if (!mem_info->bound_ranges[bound_object_handle].valid) {
490            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
491                           reinterpret_cast<uint64_t &>(mem), __LINE__, MEMTRACK_INVALID_MEM_REGION, "MEM",
492                           "%s: Cannot read invalid region of memory allocation 0x%" PRIx64 " for bound %s object 0x%" PRIx64
493                           ", please fill the memory before using.",
494                           functionName, reinterpret_cast<uint64_t &>(mem), object_type_to_string(type), bound_object_handle);
495        }
496    }
497    return false;
498}
499// For given image_node
500//  If mem is special swapchain key, then verify that image_node valid member is true
501//  Else verify that the image's bound memory range is valid
502static bool ValidateImageMemoryIsValid(layer_data *dev_data, IMAGE_NODE *image_node, const char *functionName) {
503    if (image_node->mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
504        if (!image_node->valid) {
505            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
506                           reinterpret_cast<uint64_t &>(image_node->mem), __LINE__, MEMTRACK_INVALID_MEM_REGION, "MEM",
507                           "%s: Cannot read invalid swapchain image 0x%" PRIx64 ", please fill the memory before using.",
508                           functionName, reinterpret_cast<uint64_t &>(image_node->image));
509        }
510    } else {
511        return ValidateMemoryIsValid(dev_data, image_node->mem, reinterpret_cast<uint64_t &>(image_node->image),
512                                     VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, functionName);
513    }
514    return false;
515}
516// For given buffer_node, verify that the range it's bound to is valid
517static bool ValidateBufferMemoryIsValid(layer_data *dev_data, BUFFER_NODE *buffer_node, const char *functionName) {
518    return ValidateMemoryIsValid(dev_data, buffer_node->mem, reinterpret_cast<uint64_t &>(buffer_node->buffer),
519                                 VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, functionName);
520}
521// For the given memory allocation, set the range bound by the given handle object to the valid param value
522static void SetMemoryValid(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, bool valid) {
523    DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem);
524    if (mem_info) {
525        mem_info->bound_ranges[handle].valid = valid;
526    }
527}
528// For given image node
529//  If mem is special swapchain key, then set entire image_node to valid param value
530//  Else set the image's bound memory range to valid param value
531static void SetImageMemoryValid(layer_data *dev_data, IMAGE_NODE *image_node, bool valid) {
532    if (image_node->mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
533        image_node->valid = valid;
534    } else {
535        SetMemoryValid(dev_data, image_node->mem, reinterpret_cast<uint64_t &>(image_node->image), valid);
536    }
537}
538// For given buffer node set the buffer's bound memory range to valid param value
539static void SetBufferMemoryValid(layer_data *dev_data, BUFFER_NODE *buffer_node, bool valid) {
540    SetMemoryValid(dev_data, buffer_node->mem, reinterpret_cast<uint64_t &>(buffer_node->buffer), valid);
541}
542// Find CB Info and add mem reference to list container
543// Find Mem Obj Info and add CB reference to list container
544static bool update_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb, const VkDeviceMemory mem,
545                                              const char *apiName) {
546    bool skip_call = false;
547
548    // Skip validation if this image was created through WSI
549    if (mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
550
551        // First update CB binding in MemObj mini CB list
552        DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, mem);
553        if (pMemInfo) {
554            pMemInfo->command_buffer_bindings.insert(cb);
555            // Now update CBInfo's Mem reference list
556            GLOBAL_CB_NODE *pCBNode = getCBNode(dev_data, cb);
557            // TODO: keep track of all destroyed CBs so we know if this is a stale or simply invalid object
558            if (pCBNode) {
559                pCBNode->memObjs.insert(mem);
560            }
561        }
562    }
563    return skip_call;
564}
565
566// Create binding link between given sampler and command buffer node
567void AddCommandBufferBindingSampler(GLOBAL_CB_NODE *cb_node, SAMPLER_NODE *sampler_node) {
568    sampler_node->cb_bindings.insert(cb_node);
569    cb_node->object_bindings.insert({reinterpret_cast<uint64_t &>(sampler_node->sampler), VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT});
570}
571
572// Create binding link between given image node and command buffer node
573void AddCommandBufferBindingImage(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, IMAGE_NODE *img_node) {
574    // Skip validation if this image was created through WSI
575    if (img_node->mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
576        // First update CB binding in MemObj mini CB list
577        DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, img_node->mem);
578        if (pMemInfo) {
579            pMemInfo->command_buffer_bindings.insert(cb_node->commandBuffer);
580            // Now update CBInfo's Mem reference list
581            cb_node->memObjs.insert(img_node->mem);
582        }
583        cb_node->object_bindings.insert({reinterpret_cast<uint64_t &>(img_node->image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT});
584    }
585    // Now update cb binding for image
586    img_node->cb_bindings.insert(cb_node);
587}
588
589// Create binding link between given image view node and its image with command buffer node
590void AddCommandBufferBindingImageView(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, IMAGE_VIEW_STATE *view_state) {
591    // First add bindings for imageView
592    view_state->cb_bindings.insert(cb_node);
593    auto image_node = getImageNode(dev_data, view_state->create_info.image);
594    cb_node->object_bindings.insert(
595        {reinterpret_cast<uint64_t &>(view_state->image_view), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT});
596    // Add bindings for image within imageView
597    if (image_node) {
598        AddCommandBufferBindingImage(dev_data, cb_node, image_node);
599    }
600}
601
602// Create binding link between given buffer node and command buffer node
603void AddCommandBufferBindingBuffer(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, BUFFER_NODE *buff_node) {
604    // First update CB binding in MemObj mini CB list
605    DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, buff_node->mem);
606    if (pMemInfo) {
607        pMemInfo->command_buffer_bindings.insert(cb_node->commandBuffer);
608        // Now update CBInfo's Mem reference list
609        cb_node->memObjs.insert(buff_node->mem);
610        cb_node->object_bindings.insert({reinterpret_cast<uint64_t &>(buff_node->buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT});
611    }
612    // Now update cb binding for buffer
613    buff_node->cb_bindings.insert(cb_node);
614}
615
616// For every mem obj bound to particular CB, free bindings related to that CB
617static void clear_cmd_buf_and_mem_references(layer_data *dev_data, GLOBAL_CB_NODE *pCBNode) {
618    if (pCBNode) {
619        if (pCBNode->memObjs.size() > 0) {
620            for (auto mem : pCBNode->memObjs) {
621                DEVICE_MEM_INFO *pInfo = getMemObjInfo(dev_data, mem);
622                if (pInfo) {
623                    pInfo->command_buffer_bindings.erase(pCBNode->commandBuffer);
624                }
625            }
626            pCBNode->memObjs.clear();
627        }
628        pCBNode->validate_functions.clear();
629    }
630}
631// Overloaded call to above function when GLOBAL_CB_NODE has not already been looked-up
632static void clear_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb) {
633    clear_cmd_buf_and_mem_references(dev_data, getCBNode(dev_data, cb));
634}
635
636// For given MemObjInfo, report Obj & CB bindings
637static bool reportMemReferencesAndCleanUp(layer_data *dev_data, DEVICE_MEM_INFO *pMemObjInfo) {
638    bool skip_call = false;
639    size_t cmdBufRefCount = pMemObjInfo->command_buffer_bindings.size();
640    size_t objRefCount = pMemObjInfo->obj_bindings.size();
641
642    if ((pMemObjInfo->command_buffer_bindings.size()) != 0) {
643        skip_call = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
644                            (uint64_t)pMemObjInfo->mem, __LINE__, MEMTRACK_FREED_MEM_REF, "MEM",
645                            "Attempting to free memory object 0x%" PRIxLEAST64 " which still contains " PRINTF_SIZE_T_SPECIFIER
646                            " references",
647                            (uint64_t)pMemObjInfo->mem, (cmdBufRefCount + objRefCount));
648    }
649
650    if (cmdBufRefCount > 0 && pMemObjInfo->command_buffer_bindings.size() > 0) {
651        for (auto cb : pMemObjInfo->command_buffer_bindings) {
652            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
653                    (uint64_t)cb, __LINE__, MEMTRACK_FREED_MEM_REF, "MEM",
654                    "Command Buffer 0x%p still has a reference to mem obj 0x%" PRIxLEAST64, cb, (uint64_t)pMemObjInfo->mem);
655        }
656        // Clear the list of hanging references
657        pMemObjInfo->command_buffer_bindings.clear();
658    }
659
660    if (objRefCount > 0 && pMemObjInfo->obj_bindings.size() > 0) {
661        for (auto obj : pMemObjInfo->obj_bindings) {
662            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, obj.type, obj.handle, __LINE__,
663                    MEMTRACK_FREED_MEM_REF, "MEM", "VK Object 0x%" PRIxLEAST64 " still has a reference to mem obj 0x%" PRIxLEAST64,
664                    obj.handle, (uint64_t)pMemObjInfo->mem);
665        }
666        // Clear the list of hanging references
667        pMemObjInfo->obj_bindings.clear();
668    }
669    return skip_call;
670}
671
672static bool freeMemObjInfo(layer_data *dev_data, void *object, VkDeviceMemory mem, bool internal) {
673    bool skip_call = false;
674    // Parse global list to find info w/ mem
675    DEVICE_MEM_INFO *pInfo = getMemObjInfo(dev_data, mem);
676    if (pInfo) {
677        // TODO: Verify against Valid Use section
678        // Clear any CB bindings for completed CBs
679        //   TODO : Is there a better place to do this?
680
681        assert(pInfo->object != VK_NULL_HANDLE);
682        // clear_cmd_buf_and_mem_references removes elements from
683        // pInfo->command_buffer_bindings -- this copy not needed in c++14,
684        // and probably not needed in practice in c++11
685        auto bindings = pInfo->command_buffer_bindings;
686        for (auto cb : bindings) {
687            if (!dev_data->globalInFlightCmdBuffers.count(cb)) {
688                clear_cmd_buf_and_mem_references(dev_data, cb);
689            }
690        }
691
692        // Now verify that no references to this mem obj remain and remove bindings
693        if (pInfo->command_buffer_bindings.size() || pInfo->obj_bindings.size()) {
694            skip_call |= reportMemReferencesAndCleanUp(dev_data, pInfo);
695        }
696        // Delete mem obj info
697        dev_data->memObjMap.erase(dev_data->memObjMap.find(mem));
698    } else if (VK_NULL_HANDLE != mem) {
699        // The request is to free an invalid, non-zero handle
700        skip_call = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
701                            VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
702                            reinterpret_cast<uint64_t &>(mem), __LINE__,
703                            MEMTRACK_INVALID_MEM_OBJ,
704                            "MEM", "Request to delete memory object 0x%"
705                            PRIxLEAST64 " not present in memory Object Map",
706                            reinterpret_cast<uint64_t &>(mem));
707    }
708    return skip_call;
709}
710
711// Remove object binding performs 3 tasks:
712// 1. Remove ObjectInfo from MemObjInfo list container of obj bindings & free it
713// 2. Clear mem binding for image/buffer by setting its handle to 0
714// TODO : This only applied to Buffer, Image, and Swapchain objects now, how should it be updated/customized?
715static bool clear_object_binding(layer_data *dev_data, uint64_t handle, VkDebugReportObjectTypeEXT type) {
716    // TODO : Need to customize images/buffers/swapchains to track mem binding and clear it here appropriately
717    bool skip_call = false;
718    VkDeviceMemory *pMemBinding = get_object_mem_binding(dev_data, handle, type);
719    if (pMemBinding) {
720        DEVICE_MEM_INFO *pMemObjInfo = getMemObjInfo(dev_data, *pMemBinding);
721        // TODO : Make sure this is a reasonable way to reset mem binding
722        *pMemBinding = VK_NULL_HANDLE;
723        if (pMemObjInfo) {
724            // This obj is bound to a memory object. Remove the reference to this object in that memory object's list,
725            // and set the objects memory binding pointer to NULL.
726            if (!pMemObjInfo->obj_bindings.erase({handle, type})) {
727                skip_call |=
728                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT,
729                            "MEM", "While trying to clear mem binding for %s obj 0x%" PRIxLEAST64
730                                   ", unable to find that object referenced by mem obj 0x%" PRIxLEAST64,
731                            object_type_to_string(type), handle, (uint64_t)pMemObjInfo->mem);
732            }
733        }
734    }
735    return skip_call;
736}
737
738// Check to see if memory was ever bound to this image
739bool ValidateMemoryIsBoundToImage(const layer_data *dev_data, const IMAGE_NODE *image_node, const char *api_name) {
740    bool result = false;
741    if (0 == (static_cast<uint32_t>(image_node->createInfo.flags) & VK_IMAGE_CREATE_SPARSE_BINDING_BIT)) {
742        if (0 == image_node->mem) {
743            result = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
744                             reinterpret_cast<const uint64_t &>(image_node->image), __LINE__, MEMTRACK_OBJECT_NOT_BOUND, "MEM",
745                             "%s: VkImage object 0x%" PRIxLEAST64 " used without first calling vkBindImageMemory.", api_name,
746                             reinterpret_cast<const uint64_t &>(image_node->image));
747        }
748    }
749    return result;
750}
751
752// Check to see if memory was bound to this buffer
753bool ValidateMemoryIsBoundToBuffer(const layer_data *dev_data, const BUFFER_NODE *buffer_node, const char *api_name) {
754    bool result = false;
755    if (0 == (static_cast<uint32_t>(buffer_node->createInfo.flags) & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)) {
756        if (0 == buffer_node->mem) {
757            result = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
758                             reinterpret_cast<const uint64_t &>(buffer_node->buffer), __LINE__, MEMTRACK_OBJECT_NOT_BOUND, "MEM",
759                             "%s: VkBuffer object 0x%" PRIxLEAST64 " used without first calling vkBindBufferMemory.", api_name,
760                             reinterpret_cast<const uint64_t &>(buffer_node->buffer));
761        }
762    }
763    return result;
764}
765
766// For NULL mem case, output warning
767// Make sure given object is in global object map
768//  IF a previous binding existed, output validation error
769//  Otherwise, add reference from objectInfo to memoryInfo
770//  Add reference off of objInfo
771static bool set_mem_binding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle,
772                                VkDebugReportObjectTypeEXT type, const char *apiName) {
773    bool skip_call = false;
774    // Handle NULL case separately, just clear previous binding & decrement reference
775    if (mem == VK_NULL_HANDLE) {
776        // TODO: Verify against Valid Use section of spec.
777        skip_call = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_MEM_OBJ,
778                            "MEM", "In %s, attempting to Bind Obj(0x%" PRIxLEAST64 ") to NULL", apiName, handle);
779    } else {
780        VkDeviceMemory *pMemBinding = get_object_mem_binding(dev_data, handle, type);
781        assert(pMemBinding);
782        DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, mem);
783        if (pMemInfo) {
784            DEVICE_MEM_INFO *pPrevBinding = getMemObjInfo(dev_data, *pMemBinding);
785            if (pPrevBinding != NULL) {
786                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
787                                     VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_REBIND_OBJECT,
788                                     "MEM", "In %s, attempting to bind memory (0x%" PRIxLEAST64 ") to object (0x%" PRIxLEAST64
789                                            ") which has already been bound to mem object 0x%" PRIxLEAST64,
790                                     apiName, (uint64_t)mem, handle, (uint64_t)pPrevBinding->mem);
791            } else {
792                pMemInfo->obj_bindings.insert({handle, type});
793                // For image objects, make sure default memory state is correctly set
794                // TODO : What's the best/correct way to handle this?
795                if (VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT == type) {
796                    auto const image_node = getImageNode(dev_data, VkImage(handle));
797                    if (image_node) {
798                        VkImageCreateInfo ici = image_node->createInfo;
799                        if (ici.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
800                            // TODO::  More memory state transition stuff.
801                        }
802                    }
803                }
804                *pMemBinding = mem;
805            }
806        }
807    }
808    return skip_call;
809}
810
811// For NULL mem case, clear any previous binding Else...
812// Make sure given object is in its object map
813//  IF a previous binding existed, update binding
814//  Add reference from objectInfo to memoryInfo
815//  Add reference off of object's binding info
816// Return VK_TRUE if addition is successful, VK_FALSE otherwise
817static bool set_sparse_mem_binding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle,
818                                       VkDebugReportObjectTypeEXT type, const char *apiName) {
819    bool skip_call = VK_FALSE;
820    // Handle NULL case separately, just clear previous binding & decrement reference
821    if (mem == VK_NULL_HANDLE) {
822        skip_call = clear_object_binding(dev_data, handle, type);
823    } else {
824        VkDeviceMemory *pMemBinding = get_object_mem_binding(dev_data, handle, type);
825        assert(pMemBinding);
826        DEVICE_MEM_INFO *pInfo = getMemObjInfo(dev_data, mem);
827        if (pInfo) {
828            pInfo->obj_bindings.insert({handle, type});
829            // Need to set mem binding for this object
830            *pMemBinding = mem;
831        }
832    }
833    return skip_call;
834}
835
836// For handle of given object type, return memory binding
837static bool get_mem_for_type(layer_data *dev_data, uint64_t handle, VkDebugReportObjectTypeEXT type, VkDeviceMemory *mem) {
838    bool skip_call = false;
839    *mem = VK_NULL_HANDLE;
840    switch (type) {
841    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT:
842        *mem = getImageNode(dev_data, VkImage(handle))->mem;
843        break;
844    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
845        *mem = getBufferNode(dev_data, VkBuffer(handle))->mem;
846        break;
847    default:
848        assert(0);
849    }
850    if (!*mem) {
851        skip_call = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT,
852                            "MEM", "Trying to get mem binding for %s object 0x%" PRIxLEAST64
853                                   " but binding is NULL. Has memory been bound to this object?",
854                            object_type_to_string(type), handle);
855    }
856    return skip_call;
857}
858
859// Print details of MemObjInfo list
860static void print_mem_list(layer_data *dev_data) {
861    // Early out if info is not requested
862    if (!(dev_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
863        return;
864    }
865
866    // Just printing each msg individually for now, may want to package these into single large print
867    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
868            MEMTRACK_NONE, "MEM", "Details of Memory Object list (of size " PRINTF_SIZE_T_SPECIFIER " elements)",
869            dev_data->memObjMap.size());
870    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
871            MEMTRACK_NONE, "MEM", "=============================");
872
873    if (dev_data->memObjMap.size() <= 0)
874        return;
875
876    for (auto ii = dev_data->memObjMap.begin(); ii != dev_data->memObjMap.end(); ++ii) {
877        auto mem_info = (*ii).second.get();
878
879        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
880                __LINE__, MEMTRACK_NONE, "MEM", "    ===MemObjInfo at 0x%p===", (void *)mem_info);
881        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
882                __LINE__, MEMTRACK_NONE, "MEM", "    Mem object: 0x%" PRIxLEAST64, (uint64_t)(mem_info->mem));
883        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
884                __LINE__, MEMTRACK_NONE, "MEM", "    Ref Count: " PRINTF_SIZE_T_SPECIFIER,
885                mem_info->command_buffer_bindings.size() + mem_info->obj_bindings.size());
886        if (0 != mem_info->alloc_info.allocationSize) {
887            string pAllocInfoMsg = vk_print_vkmemoryallocateinfo(&mem_info->alloc_info, "MEM(INFO):         ");
888            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
889                    __LINE__, MEMTRACK_NONE, "MEM", "    Mem Alloc info:\n%s", pAllocInfoMsg.c_str());
890        } else {
891            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
892                    __LINE__, MEMTRACK_NONE, "MEM", "    Mem Alloc info is NULL (alloc done by vkCreateSwapchainKHR())");
893        }
894
895        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
896                __LINE__, MEMTRACK_NONE, "MEM", "    VK OBJECT Binding list of size " PRINTF_SIZE_T_SPECIFIER " elements:",
897                mem_info->obj_bindings.size());
898        if (mem_info->obj_bindings.size() > 0) {
899            for (auto obj : mem_info->obj_bindings) {
900                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
901                        0, __LINE__, MEMTRACK_NONE, "MEM", "       VK OBJECT 0x%" PRIx64, obj.handle);
902            }
903        }
904
905        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
906                __LINE__, MEMTRACK_NONE, "MEM",
907                "    VK Command Buffer (CB) binding list of size " PRINTF_SIZE_T_SPECIFIER " elements",
908                mem_info->command_buffer_bindings.size());
909        if (mem_info->command_buffer_bindings.size() > 0) {
910            for (auto cb : mem_info->command_buffer_bindings) {
911                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
912                        0, __LINE__, MEMTRACK_NONE, "MEM", "      VK CB 0x%p", cb);
913            }
914        }
915    }
916}
917
918static void printCBList(layer_data *my_data) {
919    GLOBAL_CB_NODE *pCBInfo = NULL;
920
921    // Early out if info is not requested
922    if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
923        return;
924    }
925
926    log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
927            MEMTRACK_NONE, "MEM", "Details of CB list (of size " PRINTF_SIZE_T_SPECIFIER " elements)",
928            my_data->commandBufferMap.size());
929    log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
930            MEMTRACK_NONE, "MEM", "==================");
931
932    if (my_data->commandBufferMap.size() <= 0)
933        return;
934
935    for (auto &cb_node : my_data->commandBufferMap) {
936        pCBInfo = cb_node.second;
937
938        log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
939                __LINE__, MEMTRACK_NONE, "MEM", "    CB Info (0x%p) has CB 0x%p", (void *)pCBInfo, (void *)pCBInfo->commandBuffer);
940
941        if (pCBInfo->memObjs.size() <= 0)
942            continue;
943        for (auto obj : pCBInfo->memObjs) {
944            log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
945                    __LINE__, MEMTRACK_NONE, "MEM", "      Mem obj 0x%" PRIx64, (uint64_t)obj);
946        }
947    }
948}
949
950// Return a string representation of CMD_TYPE enum
951static string cmdTypeToString(CMD_TYPE cmd) {
952    switch (cmd) {
953    case CMD_BINDPIPELINE:
954        return "CMD_BINDPIPELINE";
955    case CMD_BINDPIPELINEDELTA:
956        return "CMD_BINDPIPELINEDELTA";
957    case CMD_SETVIEWPORTSTATE:
958        return "CMD_SETVIEWPORTSTATE";
959    case CMD_SETLINEWIDTHSTATE:
960        return "CMD_SETLINEWIDTHSTATE";
961    case CMD_SETDEPTHBIASSTATE:
962        return "CMD_SETDEPTHBIASSTATE";
963    case CMD_SETBLENDSTATE:
964        return "CMD_SETBLENDSTATE";
965    case CMD_SETDEPTHBOUNDSSTATE:
966        return "CMD_SETDEPTHBOUNDSSTATE";
967    case CMD_SETSTENCILREADMASKSTATE:
968        return "CMD_SETSTENCILREADMASKSTATE";
969    case CMD_SETSTENCILWRITEMASKSTATE:
970        return "CMD_SETSTENCILWRITEMASKSTATE";
971    case CMD_SETSTENCILREFERENCESTATE:
972        return "CMD_SETSTENCILREFERENCESTATE";
973    case CMD_BINDDESCRIPTORSETS:
974        return "CMD_BINDDESCRIPTORSETS";
975    case CMD_BINDINDEXBUFFER:
976        return "CMD_BINDINDEXBUFFER";
977    case CMD_BINDVERTEXBUFFER:
978        return "CMD_BINDVERTEXBUFFER";
979    case CMD_DRAW:
980        return "CMD_DRAW";
981    case CMD_DRAWINDEXED:
982        return "CMD_DRAWINDEXED";
983    case CMD_DRAWINDIRECT:
984        return "CMD_DRAWINDIRECT";
985    case CMD_DRAWINDEXEDINDIRECT:
986        return "CMD_DRAWINDEXEDINDIRECT";
987    case CMD_DISPATCH:
988        return "CMD_DISPATCH";
989    case CMD_DISPATCHINDIRECT:
990        return "CMD_DISPATCHINDIRECT";
991    case CMD_COPYBUFFER:
992        return "CMD_COPYBUFFER";
993    case CMD_COPYIMAGE:
994        return "CMD_COPYIMAGE";
995    case CMD_BLITIMAGE:
996        return "CMD_BLITIMAGE";
997    case CMD_COPYBUFFERTOIMAGE:
998        return "CMD_COPYBUFFERTOIMAGE";
999    case CMD_COPYIMAGETOBUFFER:
1000        return "CMD_COPYIMAGETOBUFFER";
1001    case CMD_CLONEIMAGEDATA:
1002        return "CMD_CLONEIMAGEDATA";
1003    case CMD_UPDATEBUFFER:
1004        return "CMD_UPDATEBUFFER";
1005    case CMD_FILLBUFFER:
1006        return "CMD_FILLBUFFER";
1007    case CMD_CLEARCOLORIMAGE:
1008        return "CMD_CLEARCOLORIMAGE";
1009    case CMD_CLEARATTACHMENTS:
1010        return "CMD_CLEARCOLORATTACHMENT";
1011    case CMD_CLEARDEPTHSTENCILIMAGE:
1012        return "CMD_CLEARDEPTHSTENCILIMAGE";
1013    case CMD_RESOLVEIMAGE:
1014        return "CMD_RESOLVEIMAGE";
1015    case CMD_SETEVENT:
1016        return "CMD_SETEVENT";
1017    case CMD_RESETEVENT:
1018        return "CMD_RESETEVENT";
1019    case CMD_WAITEVENTS:
1020        return "CMD_WAITEVENTS";
1021    case CMD_PIPELINEBARRIER:
1022        return "CMD_PIPELINEBARRIER";
1023    case CMD_BEGINQUERY:
1024        return "CMD_BEGINQUERY";
1025    case CMD_ENDQUERY:
1026        return "CMD_ENDQUERY";
1027    case CMD_RESETQUERYPOOL:
1028        return "CMD_RESETQUERYPOOL";
1029    case CMD_COPYQUERYPOOLRESULTS:
1030        return "CMD_COPYQUERYPOOLRESULTS";
1031    case CMD_WRITETIMESTAMP:
1032        return "CMD_WRITETIMESTAMP";
1033    case CMD_INITATOMICCOUNTERS:
1034        return "CMD_INITATOMICCOUNTERS";
1035    case CMD_LOADATOMICCOUNTERS:
1036        return "CMD_LOADATOMICCOUNTERS";
1037    case CMD_SAVEATOMICCOUNTERS:
1038        return "CMD_SAVEATOMICCOUNTERS";
1039    case CMD_BEGINRENDERPASS:
1040        return "CMD_BEGINRENDERPASS";
1041    case CMD_ENDRENDERPASS:
1042        return "CMD_ENDRENDERPASS";
1043    default:
1044        return "UNKNOWN";
1045    }
1046}
1047
1048// SPIRV utility functions
1049static void build_def_index(shader_module *module) {
1050    for (auto insn : *module) {
1051        switch (insn.opcode()) {
1052        /* Types */
1053        case spv::OpTypeVoid:
1054        case spv::OpTypeBool:
1055        case spv::OpTypeInt:
1056        case spv::OpTypeFloat:
1057        case spv::OpTypeVector:
1058        case spv::OpTypeMatrix:
1059        case spv::OpTypeImage:
1060        case spv::OpTypeSampler:
1061        case spv::OpTypeSampledImage:
1062        case spv::OpTypeArray:
1063        case spv::OpTypeRuntimeArray:
1064        case spv::OpTypeStruct:
1065        case spv::OpTypeOpaque:
1066        case spv::OpTypePointer:
1067        case spv::OpTypeFunction:
1068        case spv::OpTypeEvent:
1069        case spv::OpTypeDeviceEvent:
1070        case spv::OpTypeReserveId:
1071        case spv::OpTypeQueue:
1072        case spv::OpTypePipe:
1073            module->def_index[insn.word(1)] = insn.offset();
1074            break;
1075
1076        /* Fixed constants */
1077        case spv::OpConstantTrue:
1078        case spv::OpConstantFalse:
1079        case spv::OpConstant:
1080        case spv::OpConstantComposite:
1081        case spv::OpConstantSampler:
1082        case spv::OpConstantNull:
1083            module->def_index[insn.word(2)] = insn.offset();
1084            break;
1085
1086        /* Specialization constants */
1087        case spv::OpSpecConstantTrue:
1088        case spv::OpSpecConstantFalse:
1089        case spv::OpSpecConstant:
1090        case spv::OpSpecConstantComposite:
1091        case spv::OpSpecConstantOp:
1092            module->def_index[insn.word(2)] = insn.offset();
1093            break;
1094
1095        /* Variables */
1096        case spv::OpVariable:
1097            module->def_index[insn.word(2)] = insn.offset();
1098            break;
1099
1100        /* Functions */
1101        case spv::OpFunction:
1102            module->def_index[insn.word(2)] = insn.offset();
1103            break;
1104
1105        default:
1106            /* We don't care about any other defs for now. */
1107            break;
1108        }
1109    }
1110}
1111
1112static spirv_inst_iter find_entrypoint(shader_module *src, char const *name, VkShaderStageFlagBits stageBits) {
1113    for (auto insn : *src) {
1114        if (insn.opcode() == spv::OpEntryPoint) {
1115            auto entrypointName = (char const *)&insn.word(3);
1116            auto entrypointStageBits = 1u << insn.word(1);
1117
1118            if (!strcmp(entrypointName, name) && (entrypointStageBits & stageBits)) {
1119                return insn;
1120            }
1121        }
1122    }
1123
1124    return src->end();
1125}
1126
1127static char const *storage_class_name(unsigned sc) {
1128    switch (sc) {
1129    case spv::StorageClassInput:
1130        return "input";
1131    case spv::StorageClassOutput:
1132        return "output";
1133    case spv::StorageClassUniformConstant:
1134        return "const uniform";
1135    case spv::StorageClassUniform:
1136        return "uniform";
1137    case spv::StorageClassWorkgroup:
1138        return "workgroup local";
1139    case spv::StorageClassCrossWorkgroup:
1140        return "workgroup global";
1141    case spv::StorageClassPrivate:
1142        return "private global";
1143    case spv::StorageClassFunction:
1144        return "function";
1145    case spv::StorageClassGeneric:
1146        return "generic";
1147    case spv::StorageClassAtomicCounter:
1148        return "atomic counter";
1149    case spv::StorageClassImage:
1150        return "image";
1151    case spv::StorageClassPushConstant:
1152        return "push constant";
1153    default:
1154        return "unknown";
1155    }
1156}
1157
1158/* get the value of an integral constant */
1159unsigned get_constant_value(shader_module const *src, unsigned id) {
1160    auto value = src->get_def(id);
1161    assert(value != src->end());
1162
1163    if (value.opcode() != spv::OpConstant) {
1164        /* TODO: Either ensure that the specialization transform is already performed on a module we're
1165            considering here, OR -- specialize on the fly now.
1166            */
1167        return 1;
1168    }
1169
1170    return value.word(3);
1171}
1172
1173
1174static void describe_type_inner(std::ostringstream &ss, shader_module const *src, unsigned type) {
1175    auto insn = src->get_def(type);
1176    assert(insn != src->end());
1177
1178    switch (insn.opcode()) {
1179    case spv::OpTypeBool:
1180        ss << "bool";
1181        break;
1182    case spv::OpTypeInt:
1183        ss << (insn.word(3) ? 's' : 'u') << "int" << insn.word(2);
1184        break;
1185    case spv::OpTypeFloat:
1186        ss << "float" << insn.word(2);
1187        break;
1188    case spv::OpTypeVector:
1189        ss << "vec" << insn.word(3) << " of ";
1190        describe_type_inner(ss, src, insn.word(2));
1191        break;
1192    case spv::OpTypeMatrix:
1193        ss << "mat" << insn.word(3) << " of ";
1194        describe_type_inner(ss, src, insn.word(2));
1195        break;
1196    case spv::OpTypeArray:
1197        ss << "arr[" << get_constant_value(src, insn.word(3)) << "] of ";
1198        describe_type_inner(ss, src, insn.word(2));
1199        break;
1200    case spv::OpTypePointer:
1201        ss << "ptr to " << storage_class_name(insn.word(2)) << " ";
1202        describe_type_inner(ss, src, insn.word(3));
1203        break;
1204    case spv::OpTypeStruct: {
1205        ss << "struct of (";
1206        for (unsigned i = 2; i < insn.len(); i++) {
1207            describe_type_inner(ss, src, insn.word(i));
1208            if (i == insn.len() - 1) {
1209                ss << ")";
1210            } else {
1211                ss << ", ";
1212            }
1213        }
1214        break;
1215    }
1216    case spv::OpTypeSampler:
1217        ss << "sampler";
1218        break;
1219    case spv::OpTypeSampledImage:
1220        ss << "sampler+";
1221        describe_type_inner(ss, src, insn.word(2));
1222        break;
1223    case spv::OpTypeImage:
1224        ss << "image(dim=" << insn.word(3) << ", sampled=" << insn.word(7) << ")";
1225        break;
1226    default:
1227        ss << "oddtype";
1228        break;
1229    }
1230}
1231
1232
1233static std::string describe_type(shader_module const *src, unsigned type) {
1234    std::ostringstream ss;
1235    describe_type_inner(ss, src, type);
1236    return ss.str();
1237}
1238
1239
1240static bool is_narrow_numeric_type(spirv_inst_iter type)
1241{
1242    if (type.opcode() != spv::OpTypeInt && type.opcode() != spv::OpTypeFloat)
1243        return false;
1244    return type.word(2) < 64;
1245}
1246
1247
1248static bool types_match(shader_module const *a, shader_module const *b, unsigned a_type, unsigned b_type, bool a_arrayed, bool b_arrayed, bool relaxed) {
1249    /* walk two type trees together, and complain about differences */
1250    auto a_insn = a->get_def(a_type);
1251    auto b_insn = b->get_def(b_type);
1252    assert(a_insn != a->end());
1253    assert(b_insn != b->end());
1254
1255    if (a_arrayed && a_insn.opcode() == spv::OpTypeArray) {
1256        return types_match(a, b, a_insn.word(2), b_type, false, b_arrayed, relaxed);
1257    }
1258
1259    if (b_arrayed && b_insn.opcode() == spv::OpTypeArray) {
1260        /* we probably just found the extra level of arrayness in b_type: compare the type inside it to a_type */
1261        return types_match(a, b, a_type, b_insn.word(2), a_arrayed, false, relaxed);
1262    }
1263
1264    if (a_insn.opcode() == spv::OpTypeVector && relaxed && is_narrow_numeric_type(b_insn)) {
1265        return types_match(a, b, a_insn.word(2), b_type, a_arrayed, b_arrayed, false);
1266    }
1267
1268    if (a_insn.opcode() != b_insn.opcode()) {
1269        return false;
1270    }
1271
1272    if (a_insn.opcode() == spv::OpTypePointer) {
1273        /* match on pointee type. storage class is expected to differ */
1274        return types_match(a, b, a_insn.word(3), b_insn.word(3), a_arrayed, b_arrayed, relaxed);
1275    }
1276
1277    if (a_arrayed || b_arrayed) {
1278        /* if we havent resolved array-of-verts by here, we're not going to. */
1279        return false;
1280    }
1281
1282    switch (a_insn.opcode()) {
1283    case spv::OpTypeBool:
1284        return true;
1285    case spv::OpTypeInt:
1286        /* match on width, signedness */
1287        return a_insn.word(2) == b_insn.word(2) && a_insn.word(3) == b_insn.word(3);
1288    case spv::OpTypeFloat:
1289        /* match on width */
1290        return a_insn.word(2) == b_insn.word(2);
1291    case spv::OpTypeVector:
1292        /* match on element type, count. */
1293        if (!types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false))
1294            return false;
1295        if (relaxed && is_narrow_numeric_type(a->get_def(a_insn.word(2)))) {
1296            return a_insn.word(3) >= b_insn.word(3);
1297        }
1298        else {
1299            return a_insn.word(3) == b_insn.word(3);
1300        }
1301    case spv::OpTypeMatrix:
1302        /* match on element type, count. */
1303        return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false) && a_insn.word(3) == b_insn.word(3);
1304    case spv::OpTypeArray:
1305        /* match on element type, count. these all have the same layout. we don't get here if
1306         * b_arrayed. This differs from vector & matrix types in that the array size is the id of a constant instruction,
1307         * not a literal within OpTypeArray */
1308        return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false) &&
1309               get_constant_value(a, a_insn.word(3)) == get_constant_value(b, b_insn.word(3));
1310    case spv::OpTypeStruct:
1311        /* match on all element types */
1312        {
1313            if (a_insn.len() != b_insn.len()) {
1314                return false; /* structs cannot match if member counts differ */
1315            }
1316
1317            for (unsigned i = 2; i < a_insn.len(); i++) {
1318                if (!types_match(a, b, a_insn.word(i), b_insn.word(i), a_arrayed, b_arrayed, false)) {
1319                    return false;
1320                }
1321            }
1322
1323            return true;
1324        }
1325    default:
1326        /* remaining types are CLisms, or may not appear in the interfaces we
1327         * are interested in. Just claim no match.
1328         */
1329        return false;
1330    }
1331}
1332
1333static int value_or_default(std::unordered_map<unsigned, unsigned> const &map, unsigned id, int def) {
1334    auto it = map.find(id);
1335    if (it == map.end())
1336        return def;
1337    else
1338        return it->second;
1339}
1340
1341static unsigned get_locations_consumed_by_type(shader_module const *src, unsigned type, bool strip_array_level) {
1342    auto insn = src->get_def(type);
1343    assert(insn != src->end());
1344
1345    switch (insn.opcode()) {
1346    case spv::OpTypePointer:
1347        /* see through the ptr -- this is only ever at the toplevel for graphics shaders;
1348         * we're never actually passing pointers around. */
1349        return get_locations_consumed_by_type(src, insn.word(3), strip_array_level);
1350    case spv::OpTypeArray:
1351        if (strip_array_level) {
1352            return get_locations_consumed_by_type(src, insn.word(2), false);
1353        } else {
1354            return get_constant_value(src, insn.word(3)) * get_locations_consumed_by_type(src, insn.word(2), false);
1355        }
1356    case spv::OpTypeMatrix:
1357        /* num locations is the dimension * element size */
1358        return insn.word(3) * get_locations_consumed_by_type(src, insn.word(2), false);
1359    case spv::OpTypeVector: {
1360        auto scalar_type = src->get_def(insn.word(2));
1361        auto bit_width = (scalar_type.opcode() == spv::OpTypeInt || scalar_type.opcode() == spv::OpTypeFloat) ?
1362            scalar_type.word(2) : 32;
1363
1364        /* locations are 128-bit wide; 3- and 4-component vectors of 64 bit
1365         * types require two. */
1366        return (bit_width * insn.word(3) + 127) / 128;
1367    }
1368    default:
1369        /* everything else is just 1. */
1370        return 1;
1371
1372        /* TODO: extend to handle 64bit scalar types, whose vectors may need
1373         * multiple locations. */
1374    }
1375}
1376
1377static unsigned get_locations_consumed_by_format(VkFormat format) {
1378    switch (format) {
1379    case VK_FORMAT_R64G64B64A64_SFLOAT:
1380    case VK_FORMAT_R64G64B64A64_SINT:
1381    case VK_FORMAT_R64G64B64A64_UINT:
1382    case VK_FORMAT_R64G64B64_SFLOAT:
1383    case VK_FORMAT_R64G64B64_SINT:
1384    case VK_FORMAT_R64G64B64_UINT:
1385        return 2;
1386    default:
1387        return 1;
1388    }
1389}
1390
1391typedef std::pair<unsigned, unsigned> location_t;
1392typedef std::pair<unsigned, unsigned> descriptor_slot_t;
1393
1394struct interface_var {
1395    uint32_t id;
1396    uint32_t type_id;
1397    uint32_t offset;
1398    bool is_patch;
1399    bool is_block_member;
1400    /* TODO: collect the name, too? Isn't required to be present. */
1401};
1402
1403struct shader_stage_attributes {
1404    char const *const name;
1405    bool arrayed_input;
1406    bool arrayed_output;
1407};
1408
1409static shader_stage_attributes shader_stage_attribs[] = {
1410    {"vertex shader", false, false},
1411    {"tessellation control shader", true, true},
1412    {"tessellation evaluation shader", true, false},
1413    {"geometry shader", true, false},
1414    {"fragment shader", false, false},
1415};
1416
1417static spirv_inst_iter get_struct_type(shader_module const *src, spirv_inst_iter def, bool is_array_of_verts) {
1418    while (true) {
1419
1420        if (def.opcode() == spv::OpTypePointer) {
1421            def = src->get_def(def.word(3));
1422        } else if (def.opcode() == spv::OpTypeArray && is_array_of_verts) {
1423            def = src->get_def(def.word(2));
1424            is_array_of_verts = false;
1425        } else if (def.opcode() == spv::OpTypeStruct) {
1426            return def;
1427        } else {
1428            return src->end();
1429        }
1430    }
1431}
1432
1433static void collect_interface_block_members(shader_module const *src,
1434                                            std::map<location_t, interface_var> *out,
1435                                            std::unordered_map<unsigned, unsigned> const &blocks, bool is_array_of_verts,
1436                                            uint32_t id, uint32_t type_id, bool is_patch) {
1437    /* Walk down the type_id presented, trying to determine whether it's actually an interface block. */
1438    auto type = get_struct_type(src, src->get_def(type_id), is_array_of_verts && !is_patch);
1439    if (type == src->end() || blocks.find(type.word(1)) == blocks.end()) {
1440        /* this isn't an interface block. */
1441        return;
1442    }
1443
1444    std::unordered_map<unsigned, unsigned> member_components;
1445
1446    /* Walk all the OpMemberDecorate for type's result id -- first pass, collect components. */
1447    for (auto insn : *src) {
1448        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1449            unsigned member_index = insn.word(2);
1450
1451            if (insn.word(3) == spv::DecorationComponent) {
1452                unsigned component = insn.word(4);
1453                member_components[member_index] = component;
1454            }
1455        }
1456    }
1457
1458    /* Second pass -- produce the output, from Location decorations */
1459    for (auto insn : *src) {
1460        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1461            unsigned member_index = insn.word(2);
1462            unsigned member_type_id = type.word(2 + member_index);
1463
1464            if (insn.word(3) == spv::DecorationLocation) {
1465                unsigned location = insn.word(4);
1466                unsigned num_locations = get_locations_consumed_by_type(src, member_type_id, false);
1467                auto component_it = member_components.find(member_index);
1468                unsigned component = component_it == member_components.end() ? 0 : component_it->second;
1469
1470                for (unsigned int offset = 0; offset < num_locations; offset++) {
1471                    interface_var v;
1472                    v.id = id;
1473                    /* TODO: member index in interface_var too? */
1474                    v.type_id = member_type_id;
1475                    v.offset = offset;
1476                    v.is_patch = is_patch;
1477                    v.is_block_member = true;
1478                    (*out)[std::make_pair(location + offset, component)] = v;
1479                }
1480            }
1481        }
1482    }
1483}
1484
1485static std::map<location_t, interface_var> collect_interface_by_location(
1486        shader_module const *src, spirv_inst_iter entrypoint,
1487        spv::StorageClass sinterface, bool is_array_of_verts) {
1488
1489    std::unordered_map<unsigned, unsigned> var_locations;
1490    std::unordered_map<unsigned, unsigned> var_builtins;
1491    std::unordered_map<unsigned, unsigned> var_components;
1492    std::unordered_map<unsigned, unsigned> blocks;
1493    std::unordered_map<unsigned, unsigned> var_patch;
1494
1495    for (auto insn : *src) {
1496
1497        /* We consider two interface models: SSO rendezvous-by-location, and
1498         * builtins. Complain about anything that fits neither model.
1499         */
1500        if (insn.opcode() == spv::OpDecorate) {
1501            if (insn.word(2) == spv::DecorationLocation) {
1502                var_locations[insn.word(1)] = insn.word(3);
1503            }
1504
1505            if (insn.word(2) == spv::DecorationBuiltIn) {
1506                var_builtins[insn.word(1)] = insn.word(3);
1507            }
1508
1509            if (insn.word(2) == spv::DecorationComponent) {
1510                var_components[insn.word(1)] = insn.word(3);
1511            }
1512
1513            if (insn.word(2) == spv::DecorationBlock) {
1514                blocks[insn.word(1)] = 1;
1515            }
1516
1517            if (insn.word(2) == spv::DecorationPatch) {
1518                var_patch[insn.word(1)] = 1;
1519            }
1520        }
1521    }
1522
1523    /* TODO: handle grouped decorations */
1524    /* TODO: handle index=1 dual source outputs from FS -- two vars will
1525     * have the same location, and we DON'T want to clobber. */
1526
1527    /* find the end of the entrypoint's name string. additional zero bytes follow the actual null
1528       terminator, to fill out the rest of the word - so we only need to look at the last byte in
1529       the word to determine which word contains the terminator. */
1530    uint32_t word = 3;
1531    while (entrypoint.word(word) & 0xff000000u) {
1532        ++word;
1533    }
1534    ++word;
1535
1536    std::map<location_t, interface_var> out;
1537
1538    for (; word < entrypoint.len(); word++) {
1539        auto insn = src->get_def(entrypoint.word(word));
1540        assert(insn != src->end());
1541        assert(insn.opcode() == spv::OpVariable);
1542
1543        if (insn.word(3) == static_cast<uint32_t>(sinterface)) {
1544            unsigned id = insn.word(2);
1545            unsigned type = insn.word(1);
1546
1547            int location = value_or_default(var_locations, id, -1);
1548            int builtin = value_or_default(var_builtins, id, -1);
1549            unsigned component = value_or_default(var_components, id, 0); /* unspecified is OK, is 0 */
1550            bool is_patch = var_patch.find(id) != var_patch.end();
1551
1552            /* All variables and interface block members in the Input or Output storage classes
1553             * must be decorated with either a builtin or an explicit location.
1554             *
1555             * TODO: integrate the interface block support here. For now, don't complain --
1556             * a valid SPIRV module will only hit this path for the interface block case, as the
1557             * individual members of the type are decorated, rather than variable declarations.
1558             */
1559
1560            if (location != -1) {
1561                /* A user-defined interface variable, with a location. Where a variable
1562                 * occupied multiple locations, emit one result for each. */
1563                unsigned num_locations = get_locations_consumed_by_type(src, type, is_array_of_verts && !is_patch);
1564                for (unsigned int offset = 0; offset < num_locations; offset++) {
1565                    interface_var v;
1566                    v.id = id;
1567                    v.type_id = type;
1568                    v.offset = offset;
1569                    v.is_patch = is_patch;
1570                    v.is_block_member = false;
1571                    out[std::make_pair(location + offset, component)] = v;
1572                }
1573            } else if (builtin == -1) {
1574                /* An interface block instance */
1575                collect_interface_block_members(src, &out, blocks, is_array_of_verts, id, type, is_patch);
1576            }
1577        }
1578    }
1579
1580    return out;
1581}
1582
1583static std::vector<std::pair<uint32_t, interface_var>> collect_interface_by_input_attachment_index(
1584        debug_report_data *report_data, shader_module const *src,
1585        std::unordered_set<uint32_t> const &accessible_ids) {
1586
1587    std::vector<std::pair<uint32_t, interface_var>> out;
1588
1589    for (auto insn : *src) {
1590        if (insn.opcode() == spv::OpDecorate) {
1591            if (insn.word(2) == spv::DecorationInputAttachmentIndex) {
1592                auto attachment_index = insn.word(3);
1593                auto id = insn.word(1);
1594
1595                if (accessible_ids.count(id)) {
1596                    auto def = src->get_def(id);
1597                    assert(def != src->end());
1598
1599                    if (def.opcode() == spv::OpVariable && insn.word(3) == spv::StorageClassUniformConstant) {
1600                        auto num_locations = get_locations_consumed_by_type(src, def.word(1), false);
1601                        for (unsigned int offset = 0; offset < num_locations; offset++) {
1602                            interface_var v;
1603                            v.id = id;
1604                            v.type_id = def.word(1);
1605                            v.offset = offset;
1606                            v.is_patch = false;
1607                            v.is_block_member = false;
1608                            out.emplace_back(attachment_index + offset, v);
1609                        }
1610                    }
1611                }
1612            }
1613        }
1614    }
1615
1616    return out;
1617}
1618
1619static std::vector<std::pair<descriptor_slot_t, interface_var>> collect_interface_by_descriptor_slot(
1620        debug_report_data *report_data, shader_module const *src,
1621        std::unordered_set<uint32_t> const &accessible_ids) {
1622
1623    std::unordered_map<unsigned, unsigned> var_sets;
1624    std::unordered_map<unsigned, unsigned> var_bindings;
1625
1626    for (auto insn : *src) {
1627        /* All variables in the Uniform or UniformConstant storage classes are required to be decorated with both
1628         * DecorationDescriptorSet and DecorationBinding.
1629         */
1630        if (insn.opcode() == spv::OpDecorate) {
1631            if (insn.word(2) == spv::DecorationDescriptorSet) {
1632                var_sets[insn.word(1)] = insn.word(3);
1633            }
1634
1635            if (insn.word(2) == spv::DecorationBinding) {
1636                var_bindings[insn.word(1)] = insn.word(3);
1637            }
1638        }
1639    }
1640
1641    std::vector<std::pair<descriptor_slot_t, interface_var>> out;
1642
1643    for (auto id : accessible_ids) {
1644        auto insn = src->get_def(id);
1645        assert(insn != src->end());
1646
1647        if (insn.opcode() == spv::OpVariable &&
1648            (insn.word(3) == spv::StorageClassUniform || insn.word(3) == spv::StorageClassUniformConstant)) {
1649            unsigned set = value_or_default(var_sets, insn.word(2), 0);
1650            unsigned binding = value_or_default(var_bindings, insn.word(2), 0);
1651
1652            interface_var v;
1653            v.id = insn.word(2);
1654            v.type_id = insn.word(1);
1655            v.offset = 0;
1656            v.is_patch = false;
1657            v.is_block_member = false;
1658            out.emplace_back(std::make_pair(set, binding), v);
1659        }
1660    }
1661
1662    return out;
1663}
1664
1665static bool validate_interface_between_stages(debug_report_data *report_data, shader_module const *producer,
1666                                              spirv_inst_iter producer_entrypoint, shader_stage_attributes const *producer_stage,
1667                                              shader_module const *consumer, spirv_inst_iter consumer_entrypoint,
1668                                              shader_stage_attributes const *consumer_stage) {
1669    bool pass = true;
1670
1671    auto outputs = collect_interface_by_location(producer, producer_entrypoint, spv::StorageClassOutput, producer_stage->arrayed_output);
1672    auto inputs = collect_interface_by_location(consumer, consumer_entrypoint, spv::StorageClassInput, consumer_stage->arrayed_input);
1673
1674    auto a_it = outputs.begin();
1675    auto b_it = inputs.begin();
1676
1677    /* maps sorted by key (location); walk them together to find mismatches */
1678    while ((outputs.size() > 0 && a_it != outputs.end()) || (inputs.size() && b_it != inputs.end())) {
1679        bool a_at_end = outputs.size() == 0 || a_it == outputs.end();
1680        bool b_at_end = inputs.size() == 0 || b_it == inputs.end();
1681        auto a_first = a_at_end ? std::make_pair(0u, 0u) : a_it->first;
1682        auto b_first = b_at_end ? std::make_pair(0u, 0u) : b_it->first;
1683
1684        if (b_at_end || ((!a_at_end) && (a_first < b_first))) {
1685            if (log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1686                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1687                        "%s writes to output location %u.%u which is not consumed by %s", producer_stage->name, a_first.first,
1688                        a_first.second, consumer_stage->name)) {
1689                pass = false;
1690            }
1691            a_it++;
1692        } else if (a_at_end || a_first > b_first) {
1693            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1694                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC",
1695                        "%s consumes input location %u.%u which is not written by %s", consumer_stage->name, b_first.first, b_first.second,
1696                        producer_stage->name)) {
1697                pass = false;
1698            }
1699            b_it++;
1700        } else {
1701            // subtleties of arrayed interfaces:
1702            // - if is_patch, then the member is not arrayed, even though the interface may be.
1703            // - if is_block_member, then the extra array level of an arrayed interface is not
1704            //   expressed in the member type -- it's expressed in the block type.
1705            if (!types_match(producer, consumer, a_it->second.type_id, b_it->second.type_id,
1706                             producer_stage->arrayed_output && !a_it->second.is_patch && !a_it->second.is_block_member,
1707                             consumer_stage->arrayed_input && !b_it->second.is_patch && !b_it->second.is_block_member,
1708                             true)) {
1709                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1710                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC", "Type mismatch on location %u.%u: '%s' vs '%s'",
1711                            a_first.first, a_first.second,
1712                            describe_type(producer, a_it->second.type_id).c_str(),
1713                            describe_type(consumer, b_it->second.type_id).c_str())) {
1714                    pass = false;
1715                }
1716            }
1717            if (a_it->second.is_patch != b_it->second.is_patch) {
1718                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1719                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1720                            "Decoration mismatch on location %u.%u: is per-%s in %s stage but "
1721                            "per-%s in %s stage", a_first.first, a_first.second,
1722                            a_it->second.is_patch ? "patch" : "vertex", producer_stage->name,
1723                            b_it->second.is_patch ? "patch" : "vertex", consumer_stage->name)) {
1724                    pass = false;
1725                }
1726            }
1727            a_it++;
1728            b_it++;
1729        }
1730    }
1731
1732    return pass;
1733}
1734
1735enum FORMAT_TYPE {
1736    FORMAT_TYPE_UNDEFINED,
1737    FORMAT_TYPE_FLOAT, /* UNORM, SNORM, FLOAT, USCALED, SSCALED, SRGB -- anything we consider float in the shader */
1738    FORMAT_TYPE_SINT,
1739    FORMAT_TYPE_UINT,
1740};
1741
1742static unsigned get_format_type(VkFormat fmt) {
1743    switch (fmt) {
1744    case VK_FORMAT_UNDEFINED:
1745        return FORMAT_TYPE_UNDEFINED;
1746    case VK_FORMAT_R8_SINT:
1747    case VK_FORMAT_R8G8_SINT:
1748    case VK_FORMAT_R8G8B8_SINT:
1749    case VK_FORMAT_R8G8B8A8_SINT:
1750    case VK_FORMAT_R16_SINT:
1751    case VK_FORMAT_R16G16_SINT:
1752    case VK_FORMAT_R16G16B16_SINT:
1753    case VK_FORMAT_R16G16B16A16_SINT:
1754    case VK_FORMAT_R32_SINT:
1755    case VK_FORMAT_R32G32_SINT:
1756    case VK_FORMAT_R32G32B32_SINT:
1757    case VK_FORMAT_R32G32B32A32_SINT:
1758    case VK_FORMAT_R64_SINT:
1759    case VK_FORMAT_R64G64_SINT:
1760    case VK_FORMAT_R64G64B64_SINT:
1761    case VK_FORMAT_R64G64B64A64_SINT:
1762    case VK_FORMAT_B8G8R8_SINT:
1763    case VK_FORMAT_B8G8R8A8_SINT:
1764    case VK_FORMAT_A8B8G8R8_SINT_PACK32:
1765    case VK_FORMAT_A2B10G10R10_SINT_PACK32:
1766    case VK_FORMAT_A2R10G10B10_SINT_PACK32:
1767        return FORMAT_TYPE_SINT;
1768    case VK_FORMAT_R8_UINT:
1769    case VK_FORMAT_R8G8_UINT:
1770    case VK_FORMAT_R8G8B8_UINT:
1771    case VK_FORMAT_R8G8B8A8_UINT:
1772    case VK_FORMAT_R16_UINT:
1773    case VK_FORMAT_R16G16_UINT:
1774    case VK_FORMAT_R16G16B16_UINT:
1775    case VK_FORMAT_R16G16B16A16_UINT:
1776    case VK_FORMAT_R32_UINT:
1777    case VK_FORMAT_R32G32_UINT:
1778    case VK_FORMAT_R32G32B32_UINT:
1779    case VK_FORMAT_R32G32B32A32_UINT:
1780    case VK_FORMAT_R64_UINT:
1781    case VK_FORMAT_R64G64_UINT:
1782    case VK_FORMAT_R64G64B64_UINT:
1783    case VK_FORMAT_R64G64B64A64_UINT:
1784    case VK_FORMAT_B8G8R8_UINT:
1785    case VK_FORMAT_B8G8R8A8_UINT:
1786    case VK_FORMAT_A8B8G8R8_UINT_PACK32:
1787    case VK_FORMAT_A2B10G10R10_UINT_PACK32:
1788    case VK_FORMAT_A2R10G10B10_UINT_PACK32:
1789        return FORMAT_TYPE_UINT;
1790    default:
1791        return FORMAT_TYPE_FLOAT;
1792    }
1793}
1794
1795/* characterizes a SPIR-V type appearing in an interface to a FF stage,
1796 * for comparison to a VkFormat's characterization above. */
1797static unsigned get_fundamental_type(shader_module const *src, unsigned type) {
1798    auto insn = src->get_def(type);
1799    assert(insn != src->end());
1800
1801    switch (insn.opcode()) {
1802    case spv::OpTypeInt:
1803        return insn.word(3) ? FORMAT_TYPE_SINT : FORMAT_TYPE_UINT;
1804    case spv::OpTypeFloat:
1805        return FORMAT_TYPE_FLOAT;
1806    case spv::OpTypeVector:
1807        return get_fundamental_type(src, insn.word(2));
1808    case spv::OpTypeMatrix:
1809        return get_fundamental_type(src, insn.word(2));
1810    case spv::OpTypeArray:
1811        return get_fundamental_type(src, insn.word(2));
1812    case spv::OpTypePointer:
1813        return get_fundamental_type(src, insn.word(3));
1814    case spv::OpTypeImage:
1815        return get_fundamental_type(src, insn.word(2));
1816
1817    default:
1818        return FORMAT_TYPE_UNDEFINED;
1819    }
1820}
1821
1822static uint32_t get_shader_stage_id(VkShaderStageFlagBits stage) {
1823    uint32_t bit_pos = u_ffs(stage);
1824    return bit_pos - 1;
1825}
1826
1827static bool validate_vi_consistency(debug_report_data *report_data, VkPipelineVertexInputStateCreateInfo const *vi) {
1828    /* walk the binding descriptions, which describe the step rate and stride of each vertex buffer.
1829     * each binding should be specified only once.
1830     */
1831    std::unordered_map<uint32_t, VkVertexInputBindingDescription const *> bindings;
1832    bool pass = true;
1833
1834    for (unsigned i = 0; i < vi->vertexBindingDescriptionCount; i++) {
1835        auto desc = &vi->pVertexBindingDescriptions[i];
1836        auto &binding = bindings[desc->binding];
1837        if (binding) {
1838            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1839                        __LINE__, SHADER_CHECKER_INCONSISTENT_VI, "SC",
1840                        "Duplicate vertex input binding descriptions for binding %d", desc->binding)) {
1841                pass = false;
1842            }
1843        } else {
1844            binding = desc;
1845        }
1846    }
1847
1848    return pass;
1849}
1850
1851static bool validate_vi_against_vs_inputs(debug_report_data *report_data, VkPipelineVertexInputStateCreateInfo const *vi,
1852                                          shader_module const *vs, spirv_inst_iter entrypoint) {
1853    bool pass = true;
1854
1855    auto inputs = collect_interface_by_location(vs, entrypoint, spv::StorageClassInput, false);
1856
1857    /* Build index by location */
1858    std::map<uint32_t, VkVertexInputAttributeDescription const *> attribs;
1859    if (vi) {
1860        for (unsigned i = 0; i < vi->vertexAttributeDescriptionCount; i++) {
1861            auto num_locations = get_locations_consumed_by_format(vi->pVertexAttributeDescriptions[i].format);
1862            for (auto j = 0u; j < num_locations; j++) {
1863                attribs[vi->pVertexAttributeDescriptions[i].location + j] = &vi->pVertexAttributeDescriptions[i];
1864            }
1865        }
1866    }
1867
1868    auto it_a = attribs.begin();
1869    auto it_b = inputs.begin();
1870    bool used = false;
1871
1872    while ((attribs.size() > 0 && it_a != attribs.end()) || (inputs.size() > 0 && it_b != inputs.end())) {
1873        bool a_at_end = attribs.size() == 0 || it_a == attribs.end();
1874        bool b_at_end = inputs.size() == 0 || it_b == inputs.end();
1875        auto a_first = a_at_end ? 0 : it_a->first;
1876        auto b_first = b_at_end ? 0 : it_b->first.first;
1877        if (!a_at_end && (b_at_end || a_first < b_first)) {
1878            if (!used && log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1879                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1880                        "Vertex attribute at location %d not consumed by VS", a_first)) {
1881                pass = false;
1882            }
1883            used = false;
1884            it_a++;
1885        } else if (!b_at_end && (a_at_end || b_first < a_first)) {
1886            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1887                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "VS consumes input at location %d but not provided",
1888                        b_first)) {
1889                pass = false;
1890            }
1891            it_b++;
1892        } else {
1893            unsigned attrib_type = get_format_type(it_a->second->format);
1894            unsigned input_type = get_fundamental_type(vs, it_b->second.type_id);
1895
1896            /* type checking */
1897            if (attrib_type != FORMAT_TYPE_UNDEFINED && input_type != FORMAT_TYPE_UNDEFINED && attrib_type != input_type) {
1898                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1899                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1900                            "Attribute type of `%s` at location %d does not match VS input type of `%s`",
1901                            string_VkFormat(it_a->second->format), a_first,
1902                            describe_type(vs, it_b->second.type_id).c_str())) {
1903                    pass = false;
1904                }
1905            }
1906
1907            /* OK! */
1908            used = true;
1909            it_b++;
1910        }
1911    }
1912
1913    return pass;
1914}
1915
1916static bool validate_fs_outputs_against_render_pass(debug_report_data *report_data, shader_module const *fs,
1917                                                    spirv_inst_iter entrypoint, VkRenderPassCreateInfo const *rpci,
1918                                                    uint32_t subpass_index) {
1919    std::map<uint32_t, VkFormat> color_attachments;
1920    auto subpass = rpci->pSubpasses[subpass_index];
1921    for (auto i = 0u; i < subpass.colorAttachmentCount; ++i) {
1922        uint32_t attachment = subpass.pColorAttachments[i].attachment;
1923        if (attachment == VK_ATTACHMENT_UNUSED)
1924            continue;
1925        if (rpci->pAttachments[attachment].format != VK_FORMAT_UNDEFINED) {
1926            color_attachments[i] = rpci->pAttachments[attachment].format;
1927        }
1928    }
1929
1930    bool pass = true;
1931
1932    /* TODO: dual source blend index (spv::DecIndex, zero if not provided) */
1933
1934    auto outputs = collect_interface_by_location(fs, entrypoint, spv::StorageClassOutput, false);
1935
1936    auto it_a = outputs.begin();
1937    auto it_b = color_attachments.begin();
1938
1939    /* Walk attachment list and outputs together */
1940
1941    while ((outputs.size() > 0 && it_a != outputs.end()) || (color_attachments.size() > 0 && it_b != color_attachments.end())) {
1942        bool a_at_end = outputs.size() == 0 || it_a == outputs.end();
1943        bool b_at_end = color_attachments.size() == 0 || it_b == color_attachments.end();
1944
1945        if (!a_at_end && (b_at_end || it_a->first.first < it_b->first)) {
1946            if (log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1947                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1948                        "FS writes to output location %d with no matching attachment", it_a->first.first)) {
1949                pass = false;
1950            }
1951            it_a++;
1952        } else if (!b_at_end && (a_at_end || it_a->first.first > it_b->first)) {
1953            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1954                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "Attachment %d not written by FS", it_b->first)) {
1955                pass = false;
1956            }
1957            it_b++;
1958        } else {
1959            unsigned output_type = get_fundamental_type(fs, it_a->second.type_id);
1960            unsigned att_type = get_format_type(it_b->second);
1961
1962            /* type checking */
1963            if (att_type != FORMAT_TYPE_UNDEFINED && output_type != FORMAT_TYPE_UNDEFINED && att_type != output_type) {
1964                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1965                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1966                            "Attachment %d of type `%s` does not match FS output type of `%s`", it_b->first,
1967                            string_VkFormat(it_b->second),
1968                            describe_type(fs, it_a->second.type_id).c_str())) {
1969                    pass = false;
1970                }
1971            }
1972
1973            /* OK! */
1974            it_a++;
1975            it_b++;
1976        }
1977    }
1978
1979    return pass;
1980}
1981
1982/* For some analyses, we need to know about all ids referenced by the static call tree of a particular
1983 * entrypoint. This is important for identifying the set of shader resources actually used by an entrypoint,
1984 * for example.
1985 * Note: we only explore parts of the image which might actually contain ids we care about for the above analyses.
1986 *  - NOT the shader input/output interfaces.
1987 *
1988 * TODO: The set of interesting opcodes here was determined by eyeballing the SPIRV spec. It might be worth
1989 * converting parts of this to be generated from the machine-readable spec instead.
1990 */
1991static std::unordered_set<uint32_t> mark_accessible_ids(shader_module const *src, spirv_inst_iter entrypoint) {
1992    std::unordered_set<uint32_t> ids;
1993    std::unordered_set<uint32_t> worklist;
1994    worklist.insert(entrypoint.word(2));
1995
1996    while (!worklist.empty()) {
1997        auto id_iter = worklist.begin();
1998        auto id = *id_iter;
1999        worklist.erase(id_iter);
2000
2001        auto insn = src->get_def(id);
2002        if (insn == src->end()) {
2003            /* id is something we didn't collect in build_def_index. that's OK -- we'll stumble
2004             * across all kinds of things here that we may not care about. */
2005            continue;
2006        }
2007
2008        /* try to add to the output set */
2009        if (!ids.insert(id).second) {
2010            continue; /* if we already saw this id, we don't want to walk it again. */
2011        }
2012
2013        switch (insn.opcode()) {
2014        case spv::OpFunction:
2015            /* scan whole body of the function, enlisting anything interesting */
2016            while (++insn, insn.opcode() != spv::OpFunctionEnd) {
2017                switch (insn.opcode()) {
2018                case spv::OpLoad:
2019                case spv::OpAtomicLoad:
2020                case spv::OpAtomicExchange:
2021                case spv::OpAtomicCompareExchange:
2022                case spv::OpAtomicCompareExchangeWeak:
2023                case spv::OpAtomicIIncrement:
2024                case spv::OpAtomicIDecrement:
2025                case spv::OpAtomicIAdd:
2026                case spv::OpAtomicISub:
2027                case spv::OpAtomicSMin:
2028                case spv::OpAtomicUMin:
2029                case spv::OpAtomicSMax:
2030                case spv::OpAtomicUMax:
2031                case spv::OpAtomicAnd:
2032                case spv::OpAtomicOr:
2033                case spv::OpAtomicXor:
2034                    worklist.insert(insn.word(3)); /* ptr */
2035                    break;
2036                case spv::OpStore:
2037                case spv::OpAtomicStore:
2038                    worklist.insert(insn.word(1)); /* ptr */
2039                    break;
2040                case spv::OpAccessChain:
2041                case spv::OpInBoundsAccessChain:
2042                    worklist.insert(insn.word(3)); /* base ptr */
2043                    break;
2044                case spv::OpSampledImage:
2045                case spv::OpImageSampleImplicitLod:
2046                case spv::OpImageSampleExplicitLod:
2047                case spv::OpImageSampleDrefImplicitLod:
2048                case spv::OpImageSampleDrefExplicitLod:
2049                case spv::OpImageSampleProjImplicitLod:
2050                case spv::OpImageSampleProjExplicitLod:
2051                case spv::OpImageSampleProjDrefImplicitLod:
2052                case spv::OpImageSampleProjDrefExplicitLod:
2053                case spv::OpImageFetch:
2054                case spv::OpImageGather:
2055                case spv::OpImageDrefGather:
2056                case spv::OpImageRead:
2057                case spv::OpImage:
2058                case spv::OpImageQueryFormat:
2059                case spv::OpImageQueryOrder:
2060                case spv::OpImageQuerySizeLod:
2061                case spv::OpImageQuerySize:
2062                case spv::OpImageQueryLod:
2063                case spv::OpImageQueryLevels:
2064                case spv::OpImageQuerySamples:
2065                case spv::OpImageSparseSampleImplicitLod:
2066                case spv::OpImageSparseSampleExplicitLod:
2067                case spv::OpImageSparseSampleDrefImplicitLod:
2068                case spv::OpImageSparseSampleDrefExplicitLod:
2069                case spv::OpImageSparseSampleProjImplicitLod:
2070                case spv::OpImageSparseSampleProjExplicitLod:
2071                case spv::OpImageSparseSampleProjDrefImplicitLod:
2072                case spv::OpImageSparseSampleProjDrefExplicitLod:
2073                case spv::OpImageSparseFetch:
2074                case spv::OpImageSparseGather:
2075                case spv::OpImageSparseDrefGather:
2076                case spv::OpImageTexelPointer:
2077                    worklist.insert(insn.word(3)); /* image or sampled image */
2078                    break;
2079                case spv::OpImageWrite:
2080                    worklist.insert(insn.word(1)); /* image -- different operand order to above */
2081                    break;
2082                case spv::OpFunctionCall:
2083                    for (uint32_t i = 3; i < insn.len(); i++) {
2084                        worklist.insert(insn.word(i)); /* fn itself, and all args */
2085                    }
2086                    break;
2087
2088                case spv::OpExtInst:
2089                    for (uint32_t i = 5; i < insn.len(); i++) {
2090                        worklist.insert(insn.word(i)); /* operands to ext inst */
2091                    }
2092                    break;
2093                }
2094            }
2095            break;
2096        }
2097    }
2098
2099    return ids;
2100}
2101
2102static bool validate_push_constant_block_against_pipeline(debug_report_data *report_data,
2103                                                          std::vector<VkPushConstantRange> const *push_constant_ranges,
2104                                                          shader_module const *src, spirv_inst_iter type,
2105                                                          VkShaderStageFlagBits stage) {
2106    bool pass = true;
2107
2108    /* strip off ptrs etc */
2109    type = get_struct_type(src, type, false);
2110    assert(type != src->end());
2111
2112    /* validate directly off the offsets. this isn't quite correct for arrays
2113     * and matrices, but is a good first step. TODO: arrays, matrices, weird
2114     * sizes */
2115    for (auto insn : *src) {
2116        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
2117
2118            if (insn.word(3) == spv::DecorationOffset) {
2119                unsigned offset = insn.word(4);
2120                auto size = 4; /* bytes; TODO: calculate this based on the type */
2121
2122                bool found_range = false;
2123                for (auto const &range : *push_constant_ranges) {
2124                    if (range.offset <= offset && range.offset + range.size >= offset + size) {
2125                        found_range = true;
2126
2127                        if ((range.stageFlags & stage) == 0) {
2128                            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2129                                        __LINE__, SHADER_CHECKER_PUSH_CONSTANT_NOT_ACCESSIBLE_FROM_STAGE, "SC",
2130                                        "Push constant range covering variable starting at "
2131                                        "offset %u not accessible from stage %s",
2132                                        offset, string_VkShaderStageFlagBits(stage))) {
2133                                pass = false;
2134                            }
2135                        }
2136
2137                        break;
2138                    }
2139                }
2140
2141                if (!found_range) {
2142                    if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2143                                __LINE__, SHADER_CHECKER_PUSH_CONSTANT_OUT_OF_RANGE, "SC",
2144                                "Push constant range covering variable starting at "
2145                                "offset %u not declared in layout",
2146                                offset)) {
2147                        pass = false;
2148                    }
2149                }
2150            }
2151        }
2152    }
2153
2154    return pass;
2155}
2156
2157static bool validate_push_constant_usage(debug_report_data *report_data,
2158                                         std::vector<VkPushConstantRange> const *push_constant_ranges, shader_module const *src,
2159                                         std::unordered_set<uint32_t> accessible_ids, VkShaderStageFlagBits stage) {
2160    bool pass = true;
2161
2162    for (auto id : accessible_ids) {
2163        auto def_insn = src->get_def(id);
2164        if (def_insn.opcode() == spv::OpVariable && def_insn.word(3) == spv::StorageClassPushConstant) {
2165            pass &= validate_push_constant_block_against_pipeline(report_data, push_constant_ranges, src,
2166                                                                  src->get_def(def_insn.word(1)), stage);
2167        }
2168    }
2169
2170    return pass;
2171}
2172
2173// For given pipelineLayout verify that the set_layout_node at slot.first
2174//  has the requested binding at slot.second and return ptr to that binding
2175static VkDescriptorSetLayoutBinding const * get_descriptor_binding(PIPELINE_LAYOUT_NODE const *pipelineLayout, descriptor_slot_t slot) {
2176
2177    if (!pipelineLayout)
2178        return nullptr;
2179
2180    if (slot.first >= pipelineLayout->set_layouts.size())
2181        return nullptr;
2182
2183    return pipelineLayout->set_layouts[slot.first]->GetDescriptorSetLayoutBindingPtrFromBinding(slot.second);
2184}
2185
2186// Block of code at start here for managing/tracking Pipeline state that this layer cares about
2187
2188static uint64_t g_drawCount[NUM_DRAW_TYPES] = {0, 0, 0, 0};
2189
2190// TODO : Should be tracking lastBound per commandBuffer and when draws occur, report based on that cmd buffer lastBound
2191//   Then need to synchronize the accesses based on cmd buffer so that if I'm reading state on one cmd buffer, updates
2192//   to that same cmd buffer by separate thread are not changing state from underneath us
2193// Track the last cmd buffer touched by this thread
2194
2195static bool hasDrawCmd(GLOBAL_CB_NODE *pCB) {
2196    for (uint32_t i = 0; i < NUM_DRAW_TYPES; i++) {
2197        if (pCB->drawCount[i])
2198            return true;
2199    }
2200    return false;
2201}
2202
2203// Check object status for selected flag state
2204static bool validate_status(layer_data *my_data, GLOBAL_CB_NODE *pNode, CBStatusFlags status_mask, VkFlags msg_flags,
2205                            DRAW_STATE_ERROR error_code, const char *fail_msg) {
2206    if (!(pNode->status & status_mask)) {
2207        return log_msg(my_data->report_data, msg_flags, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2208                       reinterpret_cast<const uint64_t &>(pNode->commandBuffer), __LINE__, error_code, "DS",
2209                       "CB object 0x%" PRIxLEAST64 ": %s", reinterpret_cast<const uint64_t &>(pNode->commandBuffer), fail_msg);
2210    }
2211    return false;
2212}
2213
2214// Retrieve pipeline node ptr for given pipeline object
2215static PIPELINE_NODE *getPipeline(layer_data const *my_data, VkPipeline pipeline) {
2216    auto it = my_data->pipelineMap.find(pipeline);
2217    if (it == my_data->pipelineMap.end()) {
2218        return nullptr;
2219    }
2220    return it->second;
2221}
2222
2223static RENDER_PASS_NODE *getRenderPass(layer_data const *my_data, VkRenderPass renderpass) {
2224    auto it = my_data->renderPassMap.find(renderpass);
2225    if (it == my_data->renderPassMap.end()) {
2226        return nullptr;
2227    }
2228    return it->second;
2229}
2230
2231static FRAMEBUFFER_NODE *getFramebuffer(const layer_data *my_data, VkFramebuffer framebuffer) {
2232    auto it = my_data->frameBufferMap.find(framebuffer);
2233    if (it == my_data->frameBufferMap.end()) {
2234        return nullptr;
2235    }
2236    return it->second.get();
2237}
2238
2239cvdescriptorset::DescriptorSetLayout const *getDescriptorSetLayout(layer_data const *my_data, VkDescriptorSetLayout dsLayout) {
2240    auto it = my_data->descriptorSetLayoutMap.find(dsLayout);
2241    if (it == my_data->descriptorSetLayoutMap.end()) {
2242        return nullptr;
2243    }
2244    return it->second;
2245}
2246
2247static PIPELINE_LAYOUT_NODE const *getPipelineLayout(layer_data const *my_data, VkPipelineLayout pipeLayout) {
2248    auto it = my_data->pipelineLayoutMap.find(pipeLayout);
2249    if (it == my_data->pipelineLayoutMap.end()) {
2250        return nullptr;
2251    }
2252    return &it->second;
2253}
2254
2255// Return true if for a given PSO, the given state enum is dynamic, else return false
2256static bool isDynamic(const PIPELINE_NODE *pPipeline, const VkDynamicState state) {
2257    if (pPipeline && pPipeline->graphicsPipelineCI.pDynamicState) {
2258        for (uint32_t i = 0; i < pPipeline->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
2259            if (state == pPipeline->graphicsPipelineCI.pDynamicState->pDynamicStates[i])
2260                return true;
2261        }
2262    }
2263    return false;
2264}
2265
2266// Validate state stored as flags at time of draw call
2267static bool validate_draw_state_flags(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const PIPELINE_NODE *pPipe, bool indexedDraw) {
2268    bool result = false;
2269    if (pPipe->graphicsPipelineCI.pInputAssemblyState &&
2270        ((pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST) ||
2271         (pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP))) {
2272        result |= validate_status(dev_data, pCB, CBSTATUS_LINE_WIDTH_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2273                                  DRAWSTATE_LINE_WIDTH_NOT_BOUND, "Dynamic line width state not set for this command buffer");
2274    }
2275    if (pPipe->graphicsPipelineCI.pRasterizationState &&
2276        (pPipe->graphicsPipelineCI.pRasterizationState->depthBiasEnable == VK_TRUE)) {
2277        result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BIAS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2278                                  DRAWSTATE_DEPTH_BIAS_NOT_BOUND, "Dynamic depth bias state not set for this command buffer");
2279    }
2280    if (pPipe->blendConstantsEnabled) {
2281        result |= validate_status(dev_data, pCB, CBSTATUS_BLEND_CONSTANTS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2282                                  DRAWSTATE_BLEND_NOT_BOUND, "Dynamic blend constants state not set for this command buffer");
2283    }
2284    if (pPipe->graphicsPipelineCI.pDepthStencilState &&
2285        (pPipe->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE)) {
2286        result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BOUNDS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2287                                  DRAWSTATE_DEPTH_BOUNDS_NOT_BOUND, "Dynamic depth bounds state not set for this command buffer");
2288    }
2289    if (pPipe->graphicsPipelineCI.pDepthStencilState &&
2290        (pPipe->graphicsPipelineCI.pDepthStencilState->stencilTestEnable == VK_TRUE)) {
2291        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_READ_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2292                                  DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil read mask state not set for this command buffer");
2293        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_WRITE_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2294                                  DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil write mask state not set for this command buffer");
2295        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_REFERENCE_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2296                                  DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil reference state not set for this command buffer");
2297    }
2298    if (indexedDraw) {
2299        result |= validate_status(dev_data, pCB, CBSTATUS_INDEX_BUFFER_BOUND, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2300                                  DRAWSTATE_INDEX_BUFFER_NOT_BOUND,
2301                                  "Index buffer object not bound to this command buffer when Indexed Draw attempted");
2302    }
2303    return result;
2304}
2305
2306// Verify attachment reference compatibility according to spec
2307//  If one array is larger, treat missing elements of shorter array as VK_ATTACHMENT_UNUSED & other array much match this
2308//  If both AttachmentReference arrays have requested index, check their corresponding AttachmentDescriptions
2309//   to make sure that format and samples counts match.
2310//  If not, they are not compatible.
2311static bool attachment_references_compatible(const uint32_t index, const VkAttachmentReference *pPrimary,
2312                                             const uint32_t primaryCount, const VkAttachmentDescription *pPrimaryAttachments,
2313                                             const VkAttachmentReference *pSecondary, const uint32_t secondaryCount,
2314                                             const VkAttachmentDescription *pSecondaryAttachments) {
2315    // Check potential NULL cases first to avoid nullptr issues later
2316    if (pPrimary == nullptr) {
2317        if (pSecondary == nullptr) {
2318            return true;
2319        }
2320        return false;
2321    } else if (pSecondary == nullptr) {
2322        return false;
2323    }
2324    if (index >= primaryCount) { // Check secondary as if primary is VK_ATTACHMENT_UNUSED
2325        if (VK_ATTACHMENT_UNUSED == pSecondary[index].attachment)
2326            return true;
2327    } else if (index >= secondaryCount) { // Check primary as if secondary is VK_ATTACHMENT_UNUSED
2328        if (VK_ATTACHMENT_UNUSED == pPrimary[index].attachment)
2329            return true;
2330    } else { // Format and sample count must match
2331        if ((pPrimary[index].attachment == VK_ATTACHMENT_UNUSED) && (pSecondary[index].attachment == VK_ATTACHMENT_UNUSED)) {
2332            return true;
2333        } else if ((pPrimary[index].attachment == VK_ATTACHMENT_UNUSED) || (pSecondary[index].attachment == VK_ATTACHMENT_UNUSED)) {
2334            return false;
2335        }
2336        if ((pPrimaryAttachments[pPrimary[index].attachment].format ==
2337             pSecondaryAttachments[pSecondary[index].attachment].format) &&
2338            (pPrimaryAttachments[pPrimary[index].attachment].samples ==
2339             pSecondaryAttachments[pSecondary[index].attachment].samples))
2340            return true;
2341    }
2342    // Format and sample counts didn't match
2343    return false;
2344}
2345// TODO : Scrub verify_renderpass_compatibility() and validateRenderPassCompatibility() and unify them and/or share code
2346// For given primary RenderPass object and secondry RenderPassCreateInfo, verify that they're compatible
2347static bool verify_renderpass_compatibility(const layer_data *my_data, const VkRenderPassCreateInfo *primaryRPCI,
2348                                            const VkRenderPassCreateInfo *secondaryRPCI, string &errorMsg) {
2349    if (primaryRPCI->subpassCount != secondaryRPCI->subpassCount) {
2350        stringstream errorStr;
2351        errorStr << "RenderPass for primary cmdBuffer has " << primaryRPCI->subpassCount
2352                 << " subpasses but renderPass for secondary cmdBuffer has " << secondaryRPCI->subpassCount << " subpasses.";
2353        errorMsg = errorStr.str();
2354        return false;
2355    }
2356    uint32_t spIndex = 0;
2357    for (spIndex = 0; spIndex < primaryRPCI->subpassCount; ++spIndex) {
2358        // For each subpass, verify that corresponding color, input, resolve & depth/stencil attachment references are compatible
2359        uint32_t primaryColorCount = primaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2360        uint32_t secondaryColorCount = secondaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2361        uint32_t colorMax = std::max(primaryColorCount, secondaryColorCount);
2362        for (uint32_t cIdx = 0; cIdx < colorMax; ++cIdx) {
2363            if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pColorAttachments, primaryColorCount,
2364                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pColorAttachments,
2365                                                  secondaryColorCount, secondaryRPCI->pAttachments)) {
2366                stringstream errorStr;
2367                errorStr << "color attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2368                errorMsg = errorStr.str();
2369                return false;
2370            } else if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2371                                                         primaryColorCount, primaryRPCI->pAttachments,
2372                                                         secondaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2373                                                         secondaryColorCount, secondaryRPCI->pAttachments)) {
2374                stringstream errorStr;
2375                errorStr << "resolve attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2376                errorMsg = errorStr.str();
2377                return false;
2378            }
2379        }
2380
2381        if (!attachment_references_compatible(0, primaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2382                                              1, primaryRPCI->pAttachments,
2383                                              secondaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2384                                              1, secondaryRPCI->pAttachments)) {
2385            stringstream errorStr;
2386            errorStr << "depth/stencil attachments of subpass index " << spIndex << " are not compatible.";
2387            errorMsg = errorStr.str();
2388            return false;
2389        }
2390
2391        uint32_t primaryInputCount = primaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2392        uint32_t secondaryInputCount = secondaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2393        uint32_t inputMax = std::max(primaryInputCount, secondaryInputCount);
2394        for (uint32_t i = 0; i < inputMax; ++i) {
2395            if (!attachment_references_compatible(i, primaryRPCI->pSubpasses[spIndex].pInputAttachments, primaryColorCount,
2396                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pInputAttachments,
2397                                                  secondaryColorCount, secondaryRPCI->pAttachments)) {
2398                stringstream errorStr;
2399                errorStr << "input attachments at index " << i << " of subpass index " << spIndex << " are not compatible.";
2400                errorMsg = errorStr.str();
2401                return false;
2402            }
2403        }
2404    }
2405    return true;
2406}
2407
2408// For given cvdescriptorset::DescriptorSet, verify that its Set is compatible w/ the setLayout corresponding to
2409// pipelineLayout[layoutIndex]
2410static bool verify_set_layout_compatibility(layer_data *my_data, const cvdescriptorset::DescriptorSet *pSet,
2411                                            PIPELINE_LAYOUT_NODE const *pipeline_layout, const uint32_t layoutIndex,
2412                                            string &errorMsg) {
2413    auto num_sets = pipeline_layout->set_layouts.size();
2414    if (layoutIndex >= num_sets) {
2415        stringstream errorStr;
2416        errorStr << "VkPipelineLayout (" << pipeline_layout->layout << ") only contains " << num_sets
2417                 << " setLayouts corresponding to sets 0-" << num_sets - 1 << ", but you're attempting to bind set to index "
2418                 << layoutIndex;
2419        errorMsg = errorStr.str();
2420        return false;
2421    }
2422    auto layout_node = pipeline_layout->set_layouts[layoutIndex];
2423    return pSet->IsCompatible(layout_node, &errorMsg);
2424}
2425
2426// Validate that data for each specialization entry is fully contained within the buffer.
2427static bool validate_specialization_offsets(debug_report_data *report_data, VkPipelineShaderStageCreateInfo const *info) {
2428    bool pass = true;
2429
2430    VkSpecializationInfo const *spec = info->pSpecializationInfo;
2431
2432    if (spec) {
2433        for (auto i = 0u; i < spec->mapEntryCount; i++) {
2434            if (spec->pMapEntries[i].offset + spec->pMapEntries[i].size > spec->dataSize) {
2435                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2436                            /*dev*/ 0, __LINE__, SHADER_CHECKER_BAD_SPECIALIZATION, "SC",
2437                            "Specialization entry %u (for constant id %u) references memory outside provided "
2438                            "specialization data (bytes %u.." PRINTF_SIZE_T_SPECIFIER "; " PRINTF_SIZE_T_SPECIFIER
2439                            " bytes provided)",
2440                            i, spec->pMapEntries[i].constantID, spec->pMapEntries[i].offset,
2441                            spec->pMapEntries[i].offset + spec->pMapEntries[i].size - 1, spec->dataSize)) {
2442
2443                    pass = false;
2444                }
2445            }
2446        }
2447    }
2448
2449    return pass;
2450}
2451
2452static bool descriptor_type_match(shader_module const *module, uint32_t type_id,
2453                                  VkDescriptorType descriptor_type, unsigned &descriptor_count) {
2454    auto type = module->get_def(type_id);
2455
2456    descriptor_count = 1;
2457
2458    /* Strip off any array or ptrs. Where we remove array levels, adjust the
2459     * descriptor count for each dimension. */
2460    while (type.opcode() == spv::OpTypeArray || type.opcode() == spv::OpTypePointer) {
2461        if (type.opcode() == spv::OpTypeArray) {
2462            descriptor_count *= get_constant_value(module, type.word(3));
2463            type = module->get_def(type.word(2));
2464        }
2465        else {
2466            type = module->get_def(type.word(3));
2467        }
2468    }
2469
2470    switch (type.opcode()) {
2471    case spv::OpTypeStruct: {
2472        for (auto insn : *module) {
2473            if (insn.opcode() == spv::OpDecorate && insn.word(1) == type.word(1)) {
2474                if (insn.word(2) == spv::DecorationBlock) {
2475                    return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
2476                           descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
2477                } else if (insn.word(2) == spv::DecorationBufferBlock) {
2478                    return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
2479                           descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
2480                }
2481            }
2482        }
2483
2484        /* Invalid */
2485        return false;
2486    }
2487
2488    case spv::OpTypeSampler:
2489        return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLER ||
2490            descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
2491
2492    case spv::OpTypeSampledImage:
2493        if (descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) {
2494            /* Slight relaxation for some GLSL historical madness: samplerBuffer
2495             * doesn't really have a sampler, and a texel buffer descriptor
2496             * doesn't really provide one. Allow this slight mismatch.
2497             */
2498            auto image_type = module->get_def(type.word(2));
2499            auto dim = image_type.word(3);
2500            auto sampled = image_type.word(7);
2501            return dim == spv::DimBuffer && sampled == 1;
2502        }
2503        return descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
2504
2505    case spv::OpTypeImage: {
2506        /* Many descriptor types backing image types-- depends on dimension
2507         * and whether the image will be used with a sampler. SPIRV for
2508         * Vulkan requires that sampled be 1 or 2 -- leaving the decision to
2509         * runtime is unacceptable.
2510         */
2511        auto dim = type.word(3);
2512        auto sampled = type.word(7);
2513
2514        if (dim == spv::DimSubpassData) {
2515            return descriptor_type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
2516        } else if (dim == spv::DimBuffer) {
2517            if (sampled == 1) {
2518                return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
2519            } else {
2520                return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
2521            }
2522        } else if (sampled == 1) {
2523            return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE ||
2524                descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
2525        } else {
2526            return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
2527        }
2528    }
2529
2530    /* We shouldn't really see any other junk types -- but if we do, they're
2531     * a mismatch.
2532     */
2533    default:
2534        return false; /* Mismatch */
2535    }
2536}
2537
2538static bool require_feature(debug_report_data *report_data, VkBool32 feature, char const *feature_name) {
2539    if (!feature) {
2540        if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2541                    __LINE__, SHADER_CHECKER_FEATURE_NOT_ENABLED, "SC",
2542                    "Shader requires VkPhysicalDeviceFeatures::%s but is not "
2543                    "enabled on the device",
2544                    feature_name)) {
2545            return false;
2546        }
2547    }
2548
2549    return true;
2550}
2551
2552static bool validate_shader_capabilities(debug_report_data *report_data, shader_module const *src,
2553                                         VkPhysicalDeviceFeatures const *enabledFeatures) {
2554    bool pass = true;
2555
2556
2557    for (auto insn : *src) {
2558        if (insn.opcode() == spv::OpCapability) {
2559            switch (insn.word(1)) {
2560            case spv::CapabilityMatrix:
2561            case spv::CapabilityShader:
2562            case spv::CapabilityInputAttachment:
2563            case spv::CapabilitySampled1D:
2564            case spv::CapabilityImage1D:
2565            case spv::CapabilitySampledBuffer:
2566            case spv::CapabilityImageBuffer:
2567            case spv::CapabilityImageQuery:
2568            case spv::CapabilityDerivativeControl:
2569                // Always supported by a Vulkan 1.0 implementation -- no feature bits.
2570                break;
2571
2572            case spv::CapabilityGeometry:
2573                pass &= require_feature(report_data, enabledFeatures->geometryShader, "geometryShader");
2574                break;
2575
2576            case spv::CapabilityTessellation:
2577                pass &= require_feature(report_data, enabledFeatures->tessellationShader, "tessellationShader");
2578                break;
2579
2580            case spv::CapabilityFloat64:
2581                pass &= require_feature(report_data, enabledFeatures->shaderFloat64, "shaderFloat64");
2582                break;
2583
2584            case spv::CapabilityInt64:
2585                pass &= require_feature(report_data, enabledFeatures->shaderInt64, "shaderInt64");
2586                break;
2587
2588            case spv::CapabilityTessellationPointSize:
2589            case spv::CapabilityGeometryPointSize:
2590                pass &= require_feature(report_data, enabledFeatures->shaderTessellationAndGeometryPointSize,
2591                                        "shaderTessellationAndGeometryPointSize");
2592                break;
2593
2594            case spv::CapabilityImageGatherExtended:
2595                pass &= require_feature(report_data, enabledFeatures->shaderImageGatherExtended, "shaderImageGatherExtended");
2596                break;
2597
2598            case spv::CapabilityStorageImageMultisample:
2599                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample");
2600                break;
2601
2602            case spv::CapabilityUniformBufferArrayDynamicIndexing:
2603                pass &= require_feature(report_data, enabledFeatures->shaderUniformBufferArrayDynamicIndexing,
2604                                        "shaderUniformBufferArrayDynamicIndexing");
2605                break;
2606
2607            case spv::CapabilitySampledImageArrayDynamicIndexing:
2608                pass &= require_feature(report_data, enabledFeatures->shaderSampledImageArrayDynamicIndexing,
2609                                        "shaderSampledImageArrayDynamicIndexing");
2610                break;
2611
2612            case spv::CapabilityStorageBufferArrayDynamicIndexing:
2613                pass &= require_feature(report_data, enabledFeatures->shaderStorageBufferArrayDynamicIndexing,
2614                                        "shaderStorageBufferArrayDynamicIndexing");
2615                break;
2616
2617            case spv::CapabilityStorageImageArrayDynamicIndexing:
2618                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageArrayDynamicIndexing,
2619                                        "shaderStorageImageArrayDynamicIndexing");
2620                break;
2621
2622            case spv::CapabilityClipDistance:
2623                pass &= require_feature(report_data, enabledFeatures->shaderClipDistance, "shaderClipDistance");
2624                break;
2625
2626            case spv::CapabilityCullDistance:
2627                pass &= require_feature(report_data, enabledFeatures->shaderCullDistance, "shaderCullDistance");
2628                break;
2629
2630            case spv::CapabilityImageCubeArray:
2631                pass &= require_feature(report_data, enabledFeatures->imageCubeArray, "imageCubeArray");
2632                break;
2633
2634            case spv::CapabilitySampleRateShading:
2635                pass &= require_feature(report_data, enabledFeatures->sampleRateShading, "sampleRateShading");
2636                break;
2637
2638            case spv::CapabilitySparseResidency:
2639                pass &= require_feature(report_data, enabledFeatures->shaderResourceResidency, "shaderResourceResidency");
2640                break;
2641
2642            case spv::CapabilityMinLod:
2643                pass &= require_feature(report_data, enabledFeatures->shaderResourceMinLod, "shaderResourceMinLod");
2644                break;
2645
2646            case spv::CapabilitySampledCubeArray:
2647                pass &= require_feature(report_data, enabledFeatures->imageCubeArray, "imageCubeArray");
2648                break;
2649
2650            case spv::CapabilityImageMSArray:
2651                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample");
2652                break;
2653
2654            case spv::CapabilityStorageImageExtendedFormats:
2655                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageExtendedFormats,
2656                                        "shaderStorageImageExtendedFormats");
2657                break;
2658
2659            case spv::CapabilityInterpolationFunction:
2660                pass &= require_feature(report_data, enabledFeatures->sampleRateShading, "sampleRateShading");
2661                break;
2662
2663            case spv::CapabilityStorageImageReadWithoutFormat:
2664                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageReadWithoutFormat,
2665                                        "shaderStorageImageReadWithoutFormat");
2666                break;
2667
2668            case spv::CapabilityStorageImageWriteWithoutFormat:
2669                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageWriteWithoutFormat,
2670                                        "shaderStorageImageWriteWithoutFormat");
2671                break;
2672
2673            case spv::CapabilityMultiViewport:
2674                pass &= require_feature(report_data, enabledFeatures->multiViewport, "multiViewport");
2675                break;
2676
2677            default:
2678                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2679                            __LINE__, SHADER_CHECKER_BAD_CAPABILITY, "SC",
2680                            "Shader declares capability %u, not supported in Vulkan.",
2681                            insn.word(1)))
2682                    pass = false;
2683                break;
2684            }
2685        }
2686    }
2687
2688    return pass;
2689}
2690
2691
2692static uint32_t descriptor_type_to_reqs(shader_module const *module, uint32_t type_id) {
2693    auto type = module->get_def(type_id);
2694
2695    while (true) {
2696        switch (type.opcode()) {
2697        case spv::OpTypeArray:
2698        case spv::OpTypeSampledImage:
2699            type = module->get_def(type.word(2));
2700            break;
2701        case spv::OpTypePointer:
2702            type = module->get_def(type.word(3));
2703            break;
2704        case spv::OpTypeImage: {
2705            auto dim = type.word(3);
2706            auto arrayed = type.word(5);
2707            auto msaa = type.word(6);
2708
2709            switch (dim) {
2710            case spv::Dim1D:
2711                return arrayed ? DESCRIPTOR_REQ_VIEW_TYPE_1D_ARRAY : DESCRIPTOR_REQ_VIEW_TYPE_1D;
2712            case spv::Dim2D:
2713                return (msaa ? DESCRIPTOR_REQ_MULTI_SAMPLE : DESCRIPTOR_REQ_SINGLE_SAMPLE) |
2714                    (arrayed ? DESCRIPTOR_REQ_VIEW_TYPE_2D_ARRAY : DESCRIPTOR_REQ_VIEW_TYPE_2D);
2715            case spv::Dim3D:
2716                return DESCRIPTOR_REQ_VIEW_TYPE_3D;
2717            case spv::DimCube:
2718                return arrayed ? DESCRIPTOR_REQ_VIEW_TYPE_CUBE_ARRAY : DESCRIPTOR_REQ_VIEW_TYPE_CUBE;
2719            case spv::DimSubpassData:
2720                return msaa ? DESCRIPTOR_REQ_MULTI_SAMPLE : DESCRIPTOR_REQ_SINGLE_SAMPLE;
2721            default:  // buffer, etc.
2722                return 0;
2723            }
2724        }
2725        default:
2726            return 0;
2727        }
2728    }
2729}
2730
2731
2732static bool validate_pipeline_shader_stage(debug_report_data *report_data,
2733                                           VkPipelineShaderStageCreateInfo const *pStage,
2734                                           PIPELINE_NODE *pipeline,
2735                                           shader_module **out_module,
2736                                           spirv_inst_iter *out_entrypoint,
2737                                           VkPhysicalDeviceFeatures const *enabledFeatures,
2738                                           std::unordered_map<VkShaderModule,
2739                                           std::unique_ptr<shader_module>> const &shaderModuleMap) {
2740    bool pass = true;
2741    auto module_it = shaderModuleMap.find(pStage->module);
2742    auto module = *out_module = module_it->second.get();
2743
2744    /* find the entrypoint */
2745    auto entrypoint = *out_entrypoint = find_entrypoint(module, pStage->pName, pStage->stage);
2746    if (entrypoint == module->end()) {
2747        if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2748                    __LINE__, SHADER_CHECKER_MISSING_ENTRYPOINT, "SC",
2749                    "No entrypoint found named `%s` for stage %s", pStage->pName,
2750                    string_VkShaderStageFlagBits(pStage->stage))) {
2751            return false;   // no point continuing beyond here, any analysis is just going to be garbage.
2752        }
2753    }
2754
2755    /* validate shader capabilities against enabled device features */
2756    pass &= validate_shader_capabilities(report_data, module, enabledFeatures);
2757
2758    /* mark accessible ids */
2759    auto accessible_ids = mark_accessible_ids(module, entrypoint);
2760
2761    /* validate descriptor set layout against what the entrypoint actually uses */
2762    auto descriptor_uses = collect_interface_by_descriptor_slot(report_data, module, accessible_ids);
2763
2764    auto pipelineLayout = pipeline->pipeline_layout;
2765
2766    pass &= validate_specialization_offsets(report_data, pStage);
2767    pass &= validate_push_constant_usage(report_data, &pipelineLayout.push_constant_ranges, module, accessible_ids, pStage->stage);
2768
2769    /* validate descriptor use */
2770    for (auto use : descriptor_uses) {
2771        // While validating shaders capture which slots are used by the pipeline
2772        auto & reqs = pipeline->active_slots[use.first.first][use.first.second];
2773        reqs = descriptor_req(reqs | descriptor_type_to_reqs(module, use.second.type_id));
2774
2775        /* verify given pipelineLayout has requested setLayout with requested binding */
2776        const auto &binding = get_descriptor_binding(&pipelineLayout, use.first);
2777        unsigned required_descriptor_count;
2778
2779        if (!binding) {
2780            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2781                        __LINE__, SHADER_CHECKER_MISSING_DESCRIPTOR, "SC",
2782                        "Shader uses descriptor slot %u.%u (used as type `%s`) but not declared in pipeline layout",
2783                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str())) {
2784                pass = false;
2785            }
2786        } else if (~binding->stageFlags & pStage->stage) {
2787            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2788                        /*dev*/ 0, __LINE__, SHADER_CHECKER_DESCRIPTOR_NOT_ACCESSIBLE_FROM_STAGE, "SC",
2789                        "Shader uses descriptor slot %u.%u (used "
2790                        "as type `%s`) but descriptor not "
2791                        "accessible from stage %s",
2792                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str(),
2793                        string_VkShaderStageFlagBits(pStage->stage))) {
2794                pass = false;
2795            }
2796        } else if (!descriptor_type_match(module, use.second.type_id, binding->descriptorType,
2797                                          /*out*/ required_descriptor_count)) {
2798            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2799                        SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC", "Type mismatch on descriptor slot "
2800                                                                       "%u.%u (used as type `%s`) but "
2801                                                                       "descriptor of type %s",
2802                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str(),
2803                        string_VkDescriptorType(binding->descriptorType))) {
2804                pass = false;
2805            }
2806        } else if (binding->descriptorCount < required_descriptor_count) {
2807            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2808                        SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC",
2809                        "Shader expects at least %u descriptors for binding %u.%u (used as type `%s`) but only %u provided",
2810                        required_descriptor_count, use.first.first, use.first.second,
2811                        describe_type(module, use.second.type_id).c_str(), binding->descriptorCount)) {
2812                pass = false;
2813            }
2814        }
2815    }
2816
2817    /* validate use of input attachments against subpass structure */
2818    if (pStage->stage == VK_SHADER_STAGE_FRAGMENT_BIT) {
2819        auto input_attachment_uses = collect_interface_by_input_attachment_index(report_data, module, accessible_ids);
2820
2821        auto rpci = pipeline->render_pass_ci.ptr();
2822        auto subpass = pipeline->graphicsPipelineCI.subpass;
2823
2824        for (auto use : input_attachment_uses) {
2825            auto input_attachments = rpci->pSubpasses[subpass].pInputAttachments;
2826            auto index = (input_attachments && use.first < rpci->pSubpasses[subpass].inputAttachmentCount) ?
2827                    input_attachments[use.first].attachment : VK_ATTACHMENT_UNUSED;
2828
2829            if (index == VK_ATTACHMENT_UNUSED) {
2830                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2831                            SHADER_CHECKER_MISSING_INPUT_ATTACHMENT, "SC",
2832                            "Shader consumes input attachment index %d but not provided in subpass",
2833                            use.first)) {
2834                    pass = false;
2835                }
2836            }
2837            else if (get_format_type(rpci->pAttachments[index].format) !=
2838                    get_fundamental_type(module, use.second.type_id)) {
2839                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2840                            SHADER_CHECKER_INPUT_ATTACHMENT_TYPE_MISMATCH, "SC",
2841                            "Subpass input attachment %u format of %s does not match type used in shader `%s`",
2842                            use.first, string_VkFormat(rpci->pAttachments[index].format),
2843                            describe_type(module, use.second.type_id).c_str())) {
2844                    pass = false;
2845                }
2846            }
2847        }
2848    }
2849
2850    return pass;
2851}
2852
2853
2854// Validate that the shaders used by the given pipeline and store the active_slots
2855//  that are actually used by the pipeline into pPipeline->active_slots
2856static bool validate_and_capture_pipeline_shader_state(debug_report_data *report_data, PIPELINE_NODE *pPipeline,
2857                                                       VkPhysicalDeviceFeatures const *enabledFeatures,
2858                                                       std::unordered_map<VkShaderModule, unique_ptr<shader_module>> const & shaderModuleMap) {
2859    auto pCreateInfo = pPipeline->graphicsPipelineCI.ptr();
2860    int vertex_stage = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2861    int fragment_stage = get_shader_stage_id(VK_SHADER_STAGE_FRAGMENT_BIT);
2862
2863    shader_module *shaders[5];
2864    memset(shaders, 0, sizeof(shaders));
2865    spirv_inst_iter entrypoints[5];
2866    memset(entrypoints, 0, sizeof(entrypoints));
2867    VkPipelineVertexInputStateCreateInfo const *vi = 0;
2868    bool pass = true;
2869
2870    for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
2871        auto pStage = &pCreateInfo->pStages[i];
2872        auto stage_id = get_shader_stage_id(pStage->stage);
2873        pass &= validate_pipeline_shader_stage(report_data, pStage, pPipeline,
2874                                               &shaders[stage_id], &entrypoints[stage_id],
2875                                               enabledFeatures, shaderModuleMap);
2876    }
2877
2878    // if the shader stages are no good individually, cross-stage validation is pointless.
2879    if (!pass)
2880        return false;
2881
2882    vi = pCreateInfo->pVertexInputState;
2883
2884    if (vi) {
2885        pass &= validate_vi_consistency(report_data, vi);
2886    }
2887
2888    if (shaders[vertex_stage]) {
2889        pass &= validate_vi_against_vs_inputs(report_data, vi, shaders[vertex_stage], entrypoints[vertex_stage]);
2890    }
2891
2892    int producer = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2893    int consumer = get_shader_stage_id(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT);
2894
2895    while (!shaders[producer] && producer != fragment_stage) {
2896        producer++;
2897        consumer++;
2898    }
2899
2900    for (; producer != fragment_stage && consumer <= fragment_stage; consumer++) {
2901        assert(shaders[producer]);
2902        if (shaders[consumer]) {
2903            pass &= validate_interface_between_stages(report_data,
2904                                                      shaders[producer], entrypoints[producer], &shader_stage_attribs[producer],
2905                                                      shaders[consumer], entrypoints[consumer], &shader_stage_attribs[consumer]);
2906
2907            producer = consumer;
2908        }
2909    }
2910
2911    if (shaders[fragment_stage]) {
2912        pass &= validate_fs_outputs_against_render_pass(report_data, shaders[fragment_stage], entrypoints[fragment_stage],
2913                                                        pPipeline->render_pass_ci.ptr(), pCreateInfo->subpass);
2914    }
2915
2916    return pass;
2917}
2918
2919static bool validate_compute_pipeline(debug_report_data *report_data, PIPELINE_NODE *pPipeline, VkPhysicalDeviceFeatures const *enabledFeatures,
2920                                      std::unordered_map<VkShaderModule, unique_ptr<shader_module>> const & shaderModuleMap) {
2921    auto pCreateInfo = pPipeline->computePipelineCI.ptr();
2922
2923    shader_module *module;
2924    spirv_inst_iter entrypoint;
2925
2926    return validate_pipeline_shader_stage(report_data, &pCreateInfo->stage, pPipeline,
2927                                          &module, &entrypoint, enabledFeatures, shaderModuleMap);
2928}
2929// Return Set node ptr for specified set or else NULL
2930cvdescriptorset::DescriptorSet *getSetNode(const layer_data *my_data, VkDescriptorSet set) {
2931    auto set_it = my_data->setMap.find(set);
2932    if (set_it == my_data->setMap.end()) {
2933        return NULL;
2934    }
2935    return set_it->second;
2936}
2937// For the given command buffer, verify and update the state for activeSetBindingsPairs
2938//  This includes:
2939//  1. Verifying that any dynamic descriptor in that set has a valid dynamic offset bound.
2940//     To be valid, the dynamic offset combined with the offset and range from its
2941//     descriptor update must not overflow the size of its buffer being updated
2942//  2. Grow updateImages for given pCB to include any bound STORAGE_IMAGE descriptor images
2943//  3. Grow updateBuffers for pCB to include buffers from STORAGE*_BUFFER descriptor buffers
2944static bool validate_and_update_drawtime_descriptor_state(
2945    layer_data *dev_data, GLOBAL_CB_NODE *pCB,
2946    const vector<std::tuple<cvdescriptorset::DescriptorSet *, std::map<uint32_t, descriptor_req>, std::vector<uint32_t> const *>>
2947        &activeSetBindingsPairs,
2948    const char *function) {
2949    bool result = false;
2950    for (auto set_bindings_pair : activeSetBindingsPairs) {
2951        cvdescriptorset::DescriptorSet *set_node = std::get<0>(set_bindings_pair);
2952        std::string err_str;
2953        if (!set_node->ValidateDrawState(std::get<1>(set_bindings_pair), *std::get<2>(set_bindings_pair),
2954                                         &err_str)) {
2955            // Report error here
2956            auto set = set_node->GetSet();
2957            result |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2958                              reinterpret_cast<const uint64_t &>(set), __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
2959                              "DS 0x%" PRIxLEAST64 " encountered the following validation error at %s() time: %s",
2960                              reinterpret_cast<const uint64_t &>(set), function, err_str.c_str());
2961        }
2962        set_node->GetStorageUpdates(std::get<1>(set_bindings_pair), &pCB->updateBuffers, &pCB->updateImages);
2963    }
2964    return result;
2965}
2966
2967// For given pipeline, return number of MSAA samples, or one if MSAA disabled
2968static VkSampleCountFlagBits getNumSamples(PIPELINE_NODE const *pipe) {
2969    if (pipe->graphicsPipelineCI.pMultisampleState != NULL &&
2970        VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO == pipe->graphicsPipelineCI.pMultisampleState->sType) {
2971        return pipe->graphicsPipelineCI.pMultisampleState->rasterizationSamples;
2972    }
2973    return VK_SAMPLE_COUNT_1_BIT;
2974}
2975
2976static void list_bits(std::ostream& s, uint32_t bits) {
2977    for (int i = 0; i < 32 && bits; i++) {
2978        if (bits & (1 << i)) {
2979            s << i;
2980            bits &= ~(1 << i);
2981            if (bits) {
2982                s << ",";
2983            }
2984        }
2985    }
2986}
2987
2988// Validate draw-time state related to the PSO
2989static bool validatePipelineDrawtimeState(layer_data const *my_data,
2990                                          LAST_BOUND_STATE const &state,
2991                                          const GLOBAL_CB_NODE *pCB,
2992                                          PIPELINE_NODE const *pPipeline) {
2993    bool skip_call = false;
2994
2995    // Verify Vtx binding
2996    if (pPipeline->vertexBindingDescriptions.size() > 0) {
2997        for (size_t i = 0; i < pPipeline->vertexBindingDescriptions.size(); i++) {
2998            auto vertex_binding = pPipeline->vertexBindingDescriptions[i].binding;
2999            if ((pCB->currentDrawData.buffers.size() < (vertex_binding + 1)) ||
3000                (pCB->currentDrawData.buffers[vertex_binding] == VK_NULL_HANDLE)) {
3001                skip_call |= log_msg(
3002                    my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3003                    DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
3004                    "The Pipeline State Object (0x%" PRIxLEAST64 ") expects that this Command Buffer's vertex binding Index %u "
3005                    "should be set via vkCmdBindVertexBuffers. This is because VkVertexInputBindingDescription struct "
3006                    "at index " PRINTF_SIZE_T_SPECIFIER " of pVertexBindingDescriptions has a binding value of %u.",
3007                    (uint64_t)state.pipeline_node->pipeline, vertex_binding, i, vertex_binding);
3008            }
3009        }
3010    } else {
3011        if (!pCB->currentDrawData.buffers.empty()) {
3012            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
3013                                 0, __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
3014                                 "Vertex buffers are bound to command buffer (0x%" PRIxLEAST64
3015                                 ") but no vertex buffers are attached to this Pipeline State Object (0x%" PRIxLEAST64 ").",
3016                                 (uint64_t)pCB->commandBuffer, (uint64_t)state.pipeline_node->pipeline);
3017        }
3018    }
3019    // If Viewport or scissors are dynamic, verify that dynamic count matches PSO count.
3020    // Skip check if rasterization is disabled or there is no viewport.
3021    if ((!pPipeline->graphicsPipelineCI.pRasterizationState ||
3022         (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) &&
3023        pPipeline->graphicsPipelineCI.pViewportState) {
3024        bool dynViewport = isDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
3025        bool dynScissor = isDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
3026
3027        if (dynViewport) {
3028            auto requiredViewportsMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->viewportCount) - 1;
3029            auto missingViewportMask = ~pCB->viewportMask & requiredViewportsMask;
3030            if (missingViewportMask) {
3031                std::stringstream ss;
3032                ss << "Dynamic viewport(s) ";
3033                list_bits(ss, missingViewportMask);
3034                ss << " are used by PSO, but were not provided via calls to vkCmdSetViewport().";
3035                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
3036                                     __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3037                                     "%s", ss.str().c_str());
3038            }
3039        }
3040
3041        if (dynScissor) {
3042            auto requiredScissorMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->scissorCount) - 1;
3043            auto missingScissorMask = ~pCB->scissorMask & requiredScissorMask;
3044            if (missingScissorMask) {
3045                std::stringstream ss;
3046                ss << "Dynamic scissor(s) ";
3047                list_bits(ss, missingScissorMask);
3048                ss << " are used by PSO, but were not provided via calls to vkCmdSetScissor().";
3049                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
3050                                     __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3051                                     "%s", ss.str().c_str());
3052            }
3053        }
3054    }
3055
3056    // Verify that any MSAA request in PSO matches sample# in bound FB
3057    // Skip the check if rasterization is disabled.
3058    if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
3059        (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
3060        VkSampleCountFlagBits pso_num_samples = getNumSamples(pPipeline);
3061        if (pCB->activeRenderPass) {
3062            const VkRenderPassCreateInfo *render_pass_info = pCB->activeRenderPass->pCreateInfo;
3063            const VkSubpassDescription *subpass_desc = &render_pass_info->pSubpasses[pCB->activeSubpass];
3064            uint32_t i;
3065
3066            const safe_VkPipelineColorBlendStateCreateInfo *color_blend_state = pPipeline->graphicsPipelineCI.pColorBlendState;
3067            if ((color_blend_state != NULL) && (pCB->activeSubpass == pPipeline->graphicsPipelineCI.subpass) &&
3068                (color_blend_state->attachmentCount != subpass_desc->colorAttachmentCount)) {
3069                skip_call |=
3070                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3071                                reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
3072                                "Render pass subpass %u mismatch with blending state defined and blend state attachment "
3073                                "count %u while subpass color attachment count %u in Pipeline (0x%" PRIxLEAST64 ")!  These "
3074                                "must be the same at draw-time.",
3075                                pCB->activeSubpass, color_blend_state->attachmentCount, subpass_desc->colorAttachmentCount,
3076                                reinterpret_cast<const uint64_t &>(pPipeline->pipeline));
3077            }
3078
3079            unsigned subpass_num_samples = 0;
3080
3081            for (i = 0; i < subpass_desc->colorAttachmentCount; i++) {
3082                auto attachment = subpass_desc->pColorAttachments[i].attachment;
3083                if (attachment != VK_ATTACHMENT_UNUSED)
3084                    subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
3085            }
3086
3087            if (subpass_desc->pDepthStencilAttachment &&
3088                subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
3089                auto attachment = subpass_desc->pDepthStencilAttachment->attachment;
3090                subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
3091            }
3092
3093            if (subpass_num_samples && static_cast<unsigned>(pso_num_samples) != subpass_num_samples) {
3094                skip_call |=
3095                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3096                                reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
3097                                "Num samples mismatch! At draw-time in Pipeline (0x%" PRIxLEAST64
3098                                ") with %u samples while current RenderPass (0x%" PRIxLEAST64 ") w/ %u samples!",
3099                                reinterpret_cast<const uint64_t &>(pPipeline->pipeline), pso_num_samples,
3100                                reinterpret_cast<const uint64_t &>(pCB->activeRenderPass->renderPass), subpass_num_samples);
3101            }
3102        } else {
3103            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3104                                 reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
3105                                 "No active render pass found at draw-time in Pipeline (0x%" PRIxLEAST64 ")!",
3106                                 reinterpret_cast<const uint64_t &>(pPipeline->pipeline));
3107        }
3108    }
3109    // Verify that PSO creation renderPass is compatible with active renderPass
3110    if (pCB->activeRenderPass) {
3111        std::string err_string;
3112        if ((pCB->activeRenderPass->renderPass != pPipeline->graphicsPipelineCI.renderPass) &&
3113            !verify_renderpass_compatibility(my_data, pCB->activeRenderPass->pCreateInfo, pPipeline->render_pass_ci.ptr(),
3114                                             err_string)) {
3115            // renderPass that PSO was created with must be compatible with active renderPass that PSO is being used with
3116            skip_call |=
3117                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3118                        reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
3119                        "At Draw time the active render pass (0x%" PRIxLEAST64 ") is incompatible w/ gfx pipeline "
3120                        "(0x%" PRIxLEAST64 ") that was created w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
3121                        reinterpret_cast<uint64_t &>(pCB->activeRenderPass->renderPass), reinterpret_cast<uint64_t &>(pPipeline),
3122                        reinterpret_cast<const uint64_t &>(pPipeline->graphicsPipelineCI.renderPass), err_string.c_str());
3123        }
3124    }
3125    // TODO : Add more checks here
3126
3127    return skip_call;
3128}
3129
3130// Validate overall state at the time of a draw call
3131static bool validate_and_update_draw_state(layer_data *my_data, GLOBAL_CB_NODE *cb_node, const bool indexedDraw,
3132                                           const VkPipelineBindPoint bindPoint, const char *function) {
3133    bool result = false;
3134    auto const &state = cb_node->lastBound[bindPoint];
3135    PIPELINE_NODE *pPipe = state.pipeline_node;
3136    if (nullptr == pPipe) {
3137        result |= log_msg(
3138            my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
3139            DRAWSTATE_INVALID_PIPELINE, "DS",
3140            "At Draw/Dispatch time no valid VkPipeline is bound! This is illegal. Please bind one with vkCmdBindPipeline().");
3141        // Early return as any further checks below will be busted w/o a pipeline
3142        if (result)
3143            return true;
3144    }
3145    // First check flag states
3146    if (VK_PIPELINE_BIND_POINT_GRAPHICS == bindPoint)
3147        result = validate_draw_state_flags(my_data, cb_node, pPipe, indexedDraw);
3148
3149    // Now complete other state checks
3150    if (VK_NULL_HANDLE != state.pipeline_layout.layout) {
3151        string errorString;
3152        auto pipeline_layout = pPipe->pipeline_layout;
3153
3154        // Need a vector (vs. std::set) of active Sets for dynamicOffset validation in case same set bound w/ different offsets
3155        vector<std::tuple<cvdescriptorset::DescriptorSet *, std::map<uint32_t, descriptor_req>, std::vector<uint32_t> const *>>
3156            activeSetBindingsPairs;
3157        for (auto & setBindingPair : pPipe->active_slots) {
3158            uint32_t setIndex = setBindingPair.first;
3159            // If valid set is not bound throw an error
3160            if ((state.boundDescriptorSets.size() <= setIndex) || (!state.boundDescriptorSets[setIndex])) {
3161                result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3162                                  DRAWSTATE_DESCRIPTOR_SET_NOT_BOUND, "DS",
3163                                  "VkPipeline 0x%" PRIxLEAST64 " uses set #%u but that set is not bound.", (uint64_t)pPipe->pipeline,
3164                                  setIndex);
3165            } else if (!verify_set_layout_compatibility(my_data, state.boundDescriptorSets[setIndex], &pipeline_layout, setIndex,
3166                                                        errorString)) {
3167                // Set is bound but not compatible w/ overlapping pipeline_layout from PSO
3168                VkDescriptorSet setHandle = state.boundDescriptorSets[setIndex]->GetSet();
3169                result |=
3170                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3171                            (uint64_t)setHandle, __LINE__, DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
3172                            "VkDescriptorSet (0x%" PRIxLEAST64
3173                            ") bound as set #%u is not compatible with overlapping VkPipelineLayout 0x%" PRIxLEAST64 " due to: %s",
3174                            reinterpret_cast<uint64_t &>(setHandle), setIndex, reinterpret_cast<uint64_t &>(pipeline_layout.layout),
3175                            errorString.c_str());
3176            } else { // Valid set is bound and layout compatible, validate that it's updated
3177                // Pull the set node
3178                cvdescriptorset::DescriptorSet *pSet = state.boundDescriptorSets[setIndex];
3179                // Gather active bindings
3180                std::unordered_set<uint32_t> bindings;
3181                for (auto binding : setBindingPair.second) {
3182                    bindings.insert(binding.first);
3183                }
3184                // Bind this set and its active descriptor resources to the command buffer
3185                pSet->BindCommandBuffer(cb_node, bindings);
3186                // Save vector of all active sets to verify dynamicOffsets below
3187                activeSetBindingsPairs.push_back(std::make_tuple(pSet, setBindingPair.second, &state.dynamicOffsets[setIndex]));
3188                // Make sure set has been updated if it has no immutable samplers
3189                //  If it has immutable samplers, we'll flag error later as needed depending on binding
3190                if (!pSet->IsUpdated()) {
3191                    for (auto binding : bindings) {
3192                        if (!pSet->GetImmutableSamplerPtrFromBinding(binding)) {
3193                            result |= log_msg(
3194                                my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3195                                (uint64_t)pSet->GetSet(), __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
3196                                "DS 0x%" PRIxLEAST64 " bound but it was never updated. It is now being used to draw so "
3197                                "this will result in undefined behavior.",
3198                                (uint64_t)pSet->GetSet());
3199                        }
3200                    }
3201                }
3202            }
3203        }
3204        // For given active slots, verify any dynamic descriptors and record updated images & buffers
3205        result |= validate_and_update_drawtime_descriptor_state(my_data, cb_node, activeSetBindingsPairs, function);
3206    }
3207
3208    // Check general pipeline state that needs to be validated at drawtime
3209    if (VK_PIPELINE_BIND_POINT_GRAPHICS == bindPoint)
3210        result |= validatePipelineDrawtimeState(my_data, state, cb_node, pPipe);
3211
3212    return result;
3213}
3214
3215// Validate HW line width capabilities prior to setting requested line width.
3216static bool verifyLineWidth(layer_data *my_data, DRAW_STATE_ERROR dsError, const uint64_t &target, float lineWidth) {
3217    bool skip_call = false;
3218
3219    // First check to see if the physical device supports wide lines.
3220    if ((VK_FALSE == my_data->phys_dev_properties.features.wideLines) && (1.0f != lineWidth)) {
3221        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, target, __LINE__,
3222                             dsError, "DS", "Attempt to set lineWidth to %f but physical device wideLines feature "
3223                                            "not supported/enabled so lineWidth must be 1.0f!",
3224                             lineWidth);
3225    } else {
3226        // Otherwise, make sure the width falls in the valid range.
3227        if ((my_data->phys_dev_properties.properties.limits.lineWidthRange[0] > lineWidth) ||
3228            (my_data->phys_dev_properties.properties.limits.lineWidthRange[1] < lineWidth)) {
3229            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, target,
3230                                 __LINE__, dsError, "DS", "Attempt to set lineWidth to %f but physical device limits line width "
3231                                                          "to between [%f, %f]!",
3232                                 lineWidth, my_data->phys_dev_properties.properties.limits.lineWidthRange[0],
3233                                 my_data->phys_dev_properties.properties.limits.lineWidthRange[1]);
3234        }
3235    }
3236
3237    return skip_call;
3238}
3239
3240// Verify that create state for a pipeline is valid
3241static bool verifyPipelineCreateState(layer_data *my_data, const VkDevice device, std::vector<PIPELINE_NODE *> pPipelines,
3242                                      int pipelineIndex) {
3243    bool skip_call = false;
3244
3245    PIPELINE_NODE *pPipeline = pPipelines[pipelineIndex];
3246
3247    // If create derivative bit is set, check that we've specified a base
3248    // pipeline correctly, and that the base pipeline was created to allow
3249    // derivatives.
3250    if (pPipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
3251        PIPELINE_NODE *pBasePipeline = nullptr;
3252        if (!((pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) ^
3253              (pPipeline->graphicsPipelineCI.basePipelineIndex != -1))) {
3254            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3255                                 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3256                                 "Invalid Pipeline CreateInfo: exactly one of base pipeline index and handle must be specified");
3257        } else if (pPipeline->graphicsPipelineCI.basePipelineIndex != -1) {
3258            if (pPipeline->graphicsPipelineCI.basePipelineIndex >= pipelineIndex) {
3259                skip_call |=
3260                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3261                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3262                            "Invalid Pipeline CreateInfo: base pipeline must occur earlier in array than derivative pipeline.");
3263            } else {
3264                pBasePipeline = pPipelines[pPipeline->graphicsPipelineCI.basePipelineIndex];
3265            }
3266        } else if (pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) {
3267            pBasePipeline = getPipeline(my_data, pPipeline->graphicsPipelineCI.basePipelineHandle);
3268        }
3269
3270        if (pBasePipeline && !(pBasePipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) {
3271            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3272                                 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3273                                 "Invalid Pipeline CreateInfo: base pipeline does not allow derivatives.");
3274        }
3275    }
3276
3277    if (pPipeline->graphicsPipelineCI.pColorBlendState != NULL) {
3278        if (!my_data->phys_dev_properties.features.independentBlend) {
3279            if (pPipeline->attachments.size() > 1) {
3280                VkPipelineColorBlendAttachmentState *pAttachments = &pPipeline->attachments[0];
3281                for (size_t i = 1; i < pPipeline->attachments.size(); i++) {
3282                    // Quoting the spec: "If [the independent blend] feature is not enabled, the VkPipelineColorBlendAttachmentState
3283                    // settings for all color attachments must be identical." VkPipelineColorBlendAttachmentState contains
3284                    // only attachment state, so memcmp is best suited for the comparison
3285                    if (memcmp(static_cast<const void *>(pAttachments), static_cast<const void *>(&pAttachments[i]),
3286                               sizeof(pAttachments[0]))) {
3287                        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3288                                             __LINE__, DRAWSTATE_INDEPENDENT_BLEND, "DS",
3289                                             "Invalid Pipeline CreateInfo: If independent blend feature not "
3290                                             "enabled, all elements of pAttachments must be identical");
3291                        break;
3292                    }
3293                }
3294            }
3295        }
3296        if (!my_data->phys_dev_properties.features.logicOp &&
3297            (pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable != VK_FALSE)) {
3298            skip_call |=
3299                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3300                        DRAWSTATE_DISABLED_LOGIC_OP, "DS",
3301                        "Invalid Pipeline CreateInfo: If logic operations feature not enabled, logicOpEnable must be VK_FALSE");
3302        }
3303        if ((pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable == VK_TRUE) &&
3304            ((pPipeline->graphicsPipelineCI.pColorBlendState->logicOp < VK_LOGIC_OP_CLEAR) ||
3305             (pPipeline->graphicsPipelineCI.pColorBlendState->logicOp > VK_LOGIC_OP_SET))) {
3306            skip_call |=
3307                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3308                        DRAWSTATE_INVALID_LOGIC_OP, "DS",
3309                        "Invalid Pipeline CreateInfo: If logicOpEnable is VK_TRUE, logicOp must be a valid VkLogicOp value");
3310        }
3311    }
3312
3313    // Ensure the subpass index is valid. If not, then validate_and_capture_pipeline_shader_state
3314    // produces nonsense errors that confuse users. Other layers should already
3315    // emit errors for renderpass being invalid.
3316    auto renderPass = getRenderPass(my_data, pPipeline->graphicsPipelineCI.renderPass);
3317    if (renderPass &&
3318        pPipeline->graphicsPipelineCI.subpass >= renderPass->pCreateInfo->subpassCount) {
3319        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3320                             DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: Subpass index %u "
3321                                                                            "is out of range for this renderpass (0..%u)",
3322                             pPipeline->graphicsPipelineCI.subpass, renderPass->pCreateInfo->subpassCount - 1);
3323    }
3324
3325    if (!validate_and_capture_pipeline_shader_state(my_data->report_data, pPipeline, &my_data->phys_dev_properties.features,
3326                                                    my_data->shaderModuleMap)) {
3327        skip_call = true;
3328    }
3329    // Each shader's stage must be unique
3330    if (pPipeline->duplicate_shaders) {
3331        for (uint32_t stage = VK_SHADER_STAGE_VERTEX_BIT; stage & VK_SHADER_STAGE_ALL_GRAPHICS; stage <<= 1) {
3332            if (pPipeline->duplicate_shaders & stage) {
3333                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
3334                                     __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3335                                     "Invalid Pipeline CreateInfo State: Multiple shaders provided for stage %s",
3336                                     string_VkShaderStageFlagBits(VkShaderStageFlagBits(stage)));
3337            }
3338        }
3339    }
3340    // VS is required
3341    if (!(pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT)) {
3342        skip_call |=
3343            log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3344                    DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: Vtx Shader required");
3345    }
3346    // Either both or neither TC/TE shaders should be defined
3347    if (((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) == 0) !=
3348        ((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) == 0)) {
3349        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3350                             DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3351                             "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair");
3352    }
3353    // Compute shaders should be specified independent of Gfx shaders
3354    if ((pPipeline->active_shaders & VK_SHADER_STAGE_COMPUTE_BIT) &&
3355        (pPipeline->active_shaders &
3356         (VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT |
3357          VK_SHADER_STAGE_GEOMETRY_BIT | VK_SHADER_STAGE_FRAGMENT_BIT))) {
3358        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3359                             DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3360                             "Invalid Pipeline CreateInfo State: Do not specify Compute Shader for Gfx Pipeline");
3361    }
3362    // VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid for tessellation pipelines.
3363    // Mismatching primitive topology and tessellation fails graphics pipeline creation.
3364    if (pPipeline->active_shaders & (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) &&
3365        (!pPipeline->graphicsPipelineCI.pInputAssemblyState ||
3366         pPipeline->graphicsPipelineCI.pInputAssemblyState->topology != VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
3367        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3368                             DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3369                                                                            "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST must be set as IA "
3370                                                                            "topology for tessellation pipelines");
3371    }
3372    if (pPipeline->graphicsPipelineCI.pInputAssemblyState &&
3373        pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) {
3374        if (~pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) {
3375            skip_call |=
3376                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3377                        DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3378                                                                       "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3379                                                                       "topology is only valid for tessellation pipelines");
3380        }
3381        if (!pPipeline->graphicsPipelineCI.pTessellationState) {
3382            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3383                                 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3384                                 "Invalid Pipeline CreateInfo State: "
3385                                 "pTessellationState is NULL when VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3386                                 "topology used. pTessellationState must not be NULL in this case.");
3387        } else if (!pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints ||
3388                   (pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints > 32)) {
3389            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3390                                 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3391                                                                                "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3392                                                                                "topology used with patchControlPoints value %u."
3393                                                                                " patchControlPoints should be >0 and <=32.",
3394                                 pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints);
3395        }
3396    }
3397    // If a rasterization state is provided, make sure that the line width conforms to the HW.
3398    if (pPipeline->graphicsPipelineCI.pRasterizationState) {
3399        if (!isDynamic(pPipeline, VK_DYNAMIC_STATE_LINE_WIDTH)) {
3400            skip_call |= verifyLineWidth(my_data, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, reinterpret_cast<uint64_t &>(pPipeline),
3401                                         pPipeline->graphicsPipelineCI.pRasterizationState->lineWidth);
3402        }
3403    }
3404    // Viewport state must be included if rasterization is enabled.
3405    // If the viewport state is included, the viewport and scissor counts should always match.
3406    // NOTE : Even if these are flagged as dynamic, counts need to be set correctly for shader compiler
3407    if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
3408        (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
3409        if (!pPipeline->graphicsPipelineCI.pViewportState) {
3410            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3411                                 DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", "Gfx Pipeline pViewportState is null. Even if viewport "
3412                                                                            "and scissors are dynamic PSO must include "
3413                                                                            "viewportCount and scissorCount in pViewportState.");
3414        } else if (pPipeline->graphicsPipelineCI.pViewportState->scissorCount !=
3415                   pPipeline->graphicsPipelineCI.pViewportState->viewportCount) {
3416            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3417                                 DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3418                                 "Gfx Pipeline viewport count (%u) must match scissor count (%u).",
3419                                 pPipeline->graphicsPipelineCI.pViewportState->viewportCount,
3420                                 pPipeline->graphicsPipelineCI.pViewportState->scissorCount);
3421        } else {
3422            // If viewport or scissor are not dynamic, then verify that data is appropriate for count
3423            bool dynViewport = isDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
3424            bool dynScissor = isDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
3425            if (!dynViewport) {
3426                if (pPipeline->graphicsPipelineCI.pViewportState->viewportCount &&
3427                    !pPipeline->graphicsPipelineCI.pViewportState->pViewports) {
3428                    skip_call |=
3429                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3430                                DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3431                                "Gfx Pipeline viewportCount is %u, but pViewports is NULL. For non-zero viewportCount, you "
3432                                "must either include pViewports data, or include viewport in pDynamicState and set it with "
3433                                "vkCmdSetViewport().",
3434                                pPipeline->graphicsPipelineCI.pViewportState->viewportCount);
3435                }
3436            }
3437            if (!dynScissor) {
3438                if (pPipeline->graphicsPipelineCI.pViewportState->scissorCount &&
3439                    !pPipeline->graphicsPipelineCI.pViewportState->pScissors) {
3440                    skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3441                                         __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3442                                         "Gfx Pipeline scissorCount is %u, but pScissors is NULL. For non-zero scissorCount, you "
3443                                         "must either include pScissors data, or include scissor in pDynamicState and set it with "
3444                                         "vkCmdSetScissor().",
3445                                         pPipeline->graphicsPipelineCI.pViewportState->scissorCount);
3446                }
3447            }
3448        }
3449
3450        // If rasterization is not disabled, and subpass uses a depth/stencil
3451        // attachment, pDepthStencilState must be a pointer to a valid structure
3452        auto subpass_desc = renderPass ? &renderPass->pCreateInfo->pSubpasses[pPipeline->graphicsPipelineCI.subpass] : nullptr;
3453        if (subpass_desc && subpass_desc->pDepthStencilAttachment &&
3454            subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
3455            if (!pPipeline->graphicsPipelineCI.pDepthStencilState) {
3456                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
3457                                     __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3458                                     "Invalid Pipeline CreateInfo State: "
3459                                     "pDepthStencilState is NULL when rasterization is enabled and subpass uses a "
3460                                     "depth/stencil attachment");
3461            }
3462        }
3463    }
3464    return skip_call;
3465}
3466
3467// Free the Pipeline nodes
3468static void deletePipelines(layer_data *my_data) {
3469    if (my_data->pipelineMap.size() <= 0)
3470        return;
3471    for (auto &pipe_map_pair : my_data->pipelineMap) {
3472        delete pipe_map_pair.second;
3473    }
3474    my_data->pipelineMap.clear();
3475}
3476
3477// Block of code at start here specifically for managing/tracking DSs
3478
3479// Return Pool node ptr for specified pool or else NULL
3480DESCRIPTOR_POOL_NODE *getPoolNode(const layer_data *dev_data, const VkDescriptorPool pool) {
3481    auto pool_it = dev_data->descriptorPoolMap.find(pool);
3482    if (pool_it == dev_data->descriptorPoolMap.end()) {
3483        return NULL;
3484    }
3485    return pool_it->second;
3486}
3487
3488// Return false if update struct is of valid type, otherwise flag error and return code from callback
3489static bool validUpdateStruct(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
3490    switch (pUpdateStruct->sType) {
3491    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3492    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3493        return false;
3494    default:
3495        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3496                       DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3497                       "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3498                       string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
3499    }
3500}
3501
3502// Set count for given update struct in the last parameter
3503static uint32_t getUpdateCount(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
3504    switch (pUpdateStruct->sType) {
3505    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3506        return ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorCount;
3507    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3508        // TODO : Need to understand this case better and make sure code is correct
3509        return ((VkCopyDescriptorSet *)pUpdateStruct)->descriptorCount;
3510    default:
3511        return 0;
3512    }
3513}
3514
3515// For given layout and update, return the first overall index of the layout that is updated
3516static uint32_t getUpdateStartIndex(layer_data *my_data, const VkDevice device, const uint32_t binding_start_index,
3517                                    const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
3518    return binding_start_index + arrayIndex;
3519}
3520// For given layout and update, return the last overall index of the layout that is updated
3521static uint32_t getUpdateEndIndex(layer_data *my_data, const VkDevice device, const uint32_t binding_start_index,
3522                                  const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
3523    uint32_t count = getUpdateCount(my_data, device, pUpdateStruct);
3524    return binding_start_index + arrayIndex + count - 1;
3525}
3526// Verify that the descriptor type in the update struct matches what's expected by the layout
3527static bool validateUpdateConsistency(layer_data *my_data, const VkDevice device, const VkDescriptorType layout_type,
3528                                      const GENERIC_HEADER *pUpdateStruct, uint32_t startIndex, uint32_t endIndex) {
3529    // First get actual type of update
3530    bool skip_call = false;
3531    VkDescriptorType actualType = VK_DESCRIPTOR_TYPE_MAX_ENUM;
3532    switch (pUpdateStruct->sType) {
3533    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3534        actualType = ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorType;
3535        break;
3536    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3537        /* no need to validate */
3538        return false;
3539        break;
3540    default:
3541        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3542                             DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3543                             "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3544                             string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
3545    }
3546    if (!skip_call) {
3547        if (layout_type != actualType) {
3548            skip_call |= log_msg(
3549                my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3550                DRAWSTATE_DESCRIPTOR_TYPE_MISMATCH, "DS",
3551                "Write descriptor update has descriptor type %s that does not match overlapping binding descriptor type of %s!",
3552                string_VkDescriptorType(actualType), string_VkDescriptorType(layout_type));
3553        }
3554    }
3555    return skip_call;
3556}
3557//TODO: Consolidate functions
3558bool FindLayout(const GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, IMAGE_CMD_BUF_LAYOUT_NODE &node, const VkImageAspectFlags aspectMask) {
3559    layer_data *my_data = get_my_data_ptr(get_dispatch_key(pCB->commandBuffer), layer_data_map);
3560    if (!(imgpair.subresource.aspectMask & aspectMask)) {
3561        return false;
3562    }
3563    VkImageAspectFlags oldAspectMask = imgpair.subresource.aspectMask;
3564    imgpair.subresource.aspectMask = aspectMask;
3565    auto imgsubIt = pCB->imageLayoutMap.find(imgpair);
3566    if (imgsubIt == pCB->imageLayoutMap.end()) {
3567        return false;
3568    }
3569    if (node.layout != VK_IMAGE_LAYOUT_MAX_ENUM && node.layout != imgsubIt->second.layout) {
3570        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3571                reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3572                "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple layout types: %s and %s",
3573                reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(node.layout), string_VkImageLayout(imgsubIt->second.layout));
3574    }
3575    if (node.initialLayout != VK_IMAGE_LAYOUT_MAX_ENUM && node.initialLayout != imgsubIt->second.initialLayout) {
3576        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3577                reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3578                "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple initial layout types: %s and %s",
3579                reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(node.initialLayout), string_VkImageLayout(imgsubIt->second.initialLayout));
3580    }
3581    node = imgsubIt->second;
3582    return true;
3583}
3584
3585bool FindLayout(const layer_data *my_data, ImageSubresourcePair imgpair, VkImageLayout &layout, const VkImageAspectFlags aspectMask) {
3586    if (!(imgpair.subresource.aspectMask & aspectMask)) {
3587        return false;
3588    }
3589    VkImageAspectFlags oldAspectMask = imgpair.subresource.aspectMask;
3590    imgpair.subresource.aspectMask = aspectMask;
3591    auto imgsubIt = my_data->imageLayoutMap.find(imgpair);
3592    if (imgsubIt == my_data->imageLayoutMap.end()) {
3593        return false;
3594    }
3595    if (layout != VK_IMAGE_LAYOUT_MAX_ENUM && layout != imgsubIt->second.layout) {
3596        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3597                reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3598                "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple layout types: %s and %s",
3599                reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(layout), string_VkImageLayout(imgsubIt->second.layout));
3600    }
3601    layout = imgsubIt->second.layout;
3602    return true;
3603}
3604
3605// find layout(s) on the cmd buf level
3606bool FindLayout(const GLOBAL_CB_NODE *pCB, VkImage image, VkImageSubresource range, IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3607    ImageSubresourcePair imgpair = {image, true, range};
3608    node = IMAGE_CMD_BUF_LAYOUT_NODE(VK_IMAGE_LAYOUT_MAX_ENUM, VK_IMAGE_LAYOUT_MAX_ENUM);
3609    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_COLOR_BIT);
3610    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_DEPTH_BIT);
3611    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_STENCIL_BIT);
3612    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_METADATA_BIT);
3613    if (node.layout == VK_IMAGE_LAYOUT_MAX_ENUM) {
3614        imgpair = {image, false, VkImageSubresource()};
3615        auto imgsubIt = pCB->imageLayoutMap.find(imgpair);
3616        if (imgsubIt == pCB->imageLayoutMap.end())
3617            return false;
3618        node = imgsubIt->second;
3619    }
3620    return true;
3621}
3622
3623// find layout(s) on the global level
3624bool FindLayout(const layer_data *my_data, ImageSubresourcePair imgpair, VkImageLayout &layout) {
3625    layout = VK_IMAGE_LAYOUT_MAX_ENUM;
3626    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT);
3627    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT);
3628    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT);
3629    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT);
3630    if (layout == VK_IMAGE_LAYOUT_MAX_ENUM) {
3631        imgpair = {imgpair.image, false, VkImageSubresource()};
3632        auto imgsubIt = my_data->imageLayoutMap.find(imgpair);
3633        if (imgsubIt == my_data->imageLayoutMap.end())
3634            return false;
3635        layout = imgsubIt->second.layout;
3636    }
3637    return true;
3638}
3639
3640bool FindLayout(const layer_data *my_data, VkImage image, VkImageSubresource range, VkImageLayout &layout) {
3641    ImageSubresourcePair imgpair = {image, true, range};
3642    return FindLayout(my_data, imgpair, layout);
3643}
3644
3645bool FindLayouts(const layer_data *my_data, VkImage image, std::vector<VkImageLayout> &layouts) {
3646    auto sub_data = my_data->imageSubresourceMap.find(image);
3647    if (sub_data == my_data->imageSubresourceMap.end())
3648        return false;
3649    auto img_node = getImageNode(my_data, image);
3650    if (!img_node)
3651        return false;
3652    bool ignoreGlobal = false;
3653    // TODO: Make this robust for >1 aspect mask. Now it will just say ignore
3654    // potential errors in this case.
3655    if (sub_data->second.size() >= (img_node->createInfo.arrayLayers * img_node->createInfo.mipLevels + 1)) {
3656        ignoreGlobal = true;
3657    }
3658    for (auto imgsubpair : sub_data->second) {
3659        if (ignoreGlobal && !imgsubpair.hasSubresource)
3660            continue;
3661        auto img_data = my_data->imageLayoutMap.find(imgsubpair);
3662        if (img_data != my_data->imageLayoutMap.end()) {
3663            layouts.push_back(img_data->second.layout);
3664        }
3665    }
3666    return true;
3667}
3668
3669// Set the layout on the global level
3670void SetLayout(layer_data *my_data, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
3671    VkImage &image = imgpair.image;
3672    // TODO (mlentine): Maybe set format if new? Not used atm.
3673    my_data->imageLayoutMap[imgpair].layout = layout;
3674    // TODO (mlentine): Maybe make vector a set?
3675    auto subresource = std::find(my_data->imageSubresourceMap[image].begin(), my_data->imageSubresourceMap[image].end(), imgpair);
3676    if (subresource == my_data->imageSubresourceMap[image].end()) {
3677        my_data->imageSubresourceMap[image].push_back(imgpair);
3678    }
3679}
3680
3681// Set the layout on the cmdbuf level
3682void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3683    pCB->imageLayoutMap[imgpair] = node;
3684    // TODO (mlentine): Maybe make vector a set?
3685    auto subresource =
3686        std::find(pCB->imageSubresourceMap[imgpair.image].begin(), pCB->imageSubresourceMap[imgpair.image].end(), imgpair);
3687    if (subresource == pCB->imageSubresourceMap[imgpair.image].end()) {
3688        pCB->imageSubresourceMap[imgpair.image].push_back(imgpair);
3689    }
3690}
3691
3692void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
3693    // TODO (mlentine): Maybe make vector a set?
3694    if (std::find(pCB->imageSubresourceMap[imgpair.image].begin(), pCB->imageSubresourceMap[imgpair.image].end(), imgpair) !=
3695        pCB->imageSubresourceMap[imgpair.image].end()) {
3696        pCB->imageLayoutMap[imgpair].layout = layout;
3697    } else {
3698        // TODO (mlentine): Could be expensive and might need to be removed.
3699        assert(imgpair.hasSubresource);
3700        IMAGE_CMD_BUF_LAYOUT_NODE node;
3701        if (!FindLayout(pCB, imgpair.image, imgpair.subresource, node)) {
3702            node.initialLayout = layout;
3703        }
3704        SetLayout(pCB, imgpair, {node.initialLayout, layout});
3705    }
3706}
3707
3708template <class OBJECT, class LAYOUT>
3709void SetLayout(OBJECT *pObject, ImageSubresourcePair imgpair, const LAYOUT &layout, VkImageAspectFlags aspectMask) {
3710    if (imgpair.subresource.aspectMask & aspectMask) {
3711        imgpair.subresource.aspectMask = aspectMask;
3712        SetLayout(pObject, imgpair, layout);
3713    }
3714}
3715
3716template <class OBJECT, class LAYOUT>
3717void SetLayout(OBJECT *pObject, VkImage image, VkImageSubresource range, const LAYOUT &layout) {
3718    ImageSubresourcePair imgpair = {image, true, range};
3719    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT);
3720    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT);
3721    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT);
3722    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT);
3723}
3724
3725template <class OBJECT, class LAYOUT> void SetLayout(OBJECT *pObject, VkImage image, const LAYOUT &layout) {
3726    ImageSubresourcePair imgpair = {image, false, VkImageSubresource()};
3727    SetLayout(pObject, image, imgpair, layout);
3728}
3729
3730void SetLayout(const layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkImageView imageView, const VkImageLayout &layout) {
3731    auto view_state = getImageViewState(dev_data, imageView);
3732    assert(view_state);
3733    const VkImage &image = view_state->create_info.image;
3734    const VkImageSubresourceRange &subRange = view_state->create_info.subresourceRange;
3735    // TODO: Do not iterate over every possibility - consolidate where possible
3736    for (uint32_t j = 0; j < subRange.levelCount; j++) {
3737        uint32_t level = subRange.baseMipLevel + j;
3738        for (uint32_t k = 0; k < subRange.layerCount; k++) {
3739            uint32_t layer = subRange.baseArrayLayer + k;
3740            VkImageSubresource sub = {subRange.aspectMask, level, layer};
3741            // TODO: If ImageView was created with depth or stencil, transition both layouts as
3742            // the aspectMask is ignored and both are used. Verify that the extra implicit layout
3743            // is OK for descriptor set layout validation
3744            if (subRange.aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
3745                if (vk_format_is_depth_and_stencil(view_state->create_info.format)) {
3746                    sub.aspectMask |= (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT);
3747                }
3748            }
3749            SetLayout(pCB, image, sub, layout);
3750        }
3751    }
3752}
3753
3754// Validate that given set is valid and that it's not being used by an in-flight CmdBuffer
3755// func_str is the name of the calling function
3756// Return false if no errors occur
3757// Return true if validation error occurs and callback returns true (to skip upcoming API call down the chain)
3758static bool validateIdleDescriptorSet(const layer_data *my_data, VkDescriptorSet set, std::string func_str) {
3759    bool skip_call = false;
3760    auto set_node = my_data->setMap.find(set);
3761    if (set_node == my_data->setMap.end()) {
3762        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3763                             (uint64_t)(set), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
3764                             "Cannot call %s() on descriptor set 0x%" PRIxLEAST64 " that has not been allocated.", func_str.c_str(),
3765                             (uint64_t)(set));
3766    } else {
3767        if (set_node->second->in_use.load()) {
3768            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
3769                                 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)(set), __LINE__, DRAWSTATE_OBJECT_INUSE,
3770                                 "DS", "Cannot call %s() on descriptor set 0x%" PRIxLEAST64 " that is in use by a command buffer.",
3771                                 func_str.c_str(), (uint64_t)(set));
3772        }
3773    }
3774    return skip_call;
3775}
3776
3777// Remove set from setMap and delete the set
3778static void freeDescriptorSet(layer_data *dev_data, cvdescriptorset::DescriptorSet *descriptor_set) {
3779    dev_data->setMap.erase(descriptor_set->GetSet());
3780    delete descriptor_set;
3781}
3782// Free all DS Pools including their Sets & related sub-structs
3783// NOTE : Calls to this function should be wrapped in mutex
3784static void deletePools(layer_data *my_data) {
3785    if (my_data->descriptorPoolMap.size() <= 0)
3786        return;
3787    for (auto ii = my_data->descriptorPoolMap.begin(); ii != my_data->descriptorPoolMap.end(); ++ii) {
3788        // Remove this pools' sets from setMap and delete them
3789        for (auto ds : (*ii).second->sets) {
3790            freeDescriptorSet(my_data, ds);
3791        }
3792        (*ii).second->sets.clear();
3793    }
3794    my_data->descriptorPoolMap.clear();
3795}
3796
3797static void clearDescriptorPool(layer_data *my_data, const VkDevice device, const VkDescriptorPool pool,
3798                                VkDescriptorPoolResetFlags flags) {
3799    DESCRIPTOR_POOL_NODE *pPool = getPoolNode(my_data, pool);
3800    // TODO: validate flags
3801    // For every set off of this pool, clear it, remove from setMap, and free cvdescriptorset::DescriptorSet
3802    for (auto ds : pPool->sets) {
3803        freeDescriptorSet(my_data, ds);
3804    }
3805    pPool->sets.clear();
3806    // Reset available count for each type and available sets for this pool
3807    for (uint32_t i = 0; i < pPool->availableDescriptorTypeCount.size(); ++i) {
3808        pPool->availableDescriptorTypeCount[i] = pPool->maxDescriptorTypeCount[i];
3809    }
3810    pPool->availableSets = pPool->maxSets;
3811}
3812
3813// For given CB object, fetch associated CB Node from map
3814static GLOBAL_CB_NODE *getCBNode(layer_data const *my_data, const VkCommandBuffer cb) {
3815    auto it = my_data->commandBufferMap.find(cb);
3816    if (it == my_data->commandBufferMap.end()) {
3817        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3818                reinterpret_cast<const uint64_t &>(cb), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3819                "Attempt to use CommandBuffer 0x%" PRIxLEAST64 " that doesn't exist!", (uint64_t)(cb));
3820        return NULL;
3821    }
3822    return it->second;
3823}
3824// Free all CB Nodes
3825// NOTE : Calls to this function should be wrapped in mutex
3826static void deleteCommandBuffers(layer_data *my_data) {
3827    if (my_data->commandBufferMap.empty()) {
3828        return;
3829    }
3830    for (auto ii = my_data->commandBufferMap.begin(); ii != my_data->commandBufferMap.end(); ++ii) {
3831        delete (*ii).second;
3832    }
3833    my_data->commandBufferMap.clear();
3834}
3835
3836static bool report_error_no_cb_begin(const layer_data *dev_data, const VkCommandBuffer cb, const char *caller_name) {
3837    return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3838                   (uint64_t)cb, __LINE__, DRAWSTATE_NO_BEGIN_COMMAND_BUFFER, "DS",
3839                   "You must call vkBeginCommandBuffer() before this call to %s", caller_name);
3840}
3841
3842bool validateCmdsInCmdBuffer(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd_type) {
3843    if (!pCB->activeRenderPass)
3844        return false;
3845    bool skip_call = false;
3846    if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS &&
3847        (cmd_type != CMD_EXECUTECOMMANDS && cmd_type != CMD_NEXTSUBPASS && cmd_type != CMD_ENDRENDERPASS)) {
3848        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3849                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3850                             "Commands cannot be called in a subpass using secondary command buffers.");
3851    } else if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_INLINE && cmd_type == CMD_EXECUTECOMMANDS) {
3852        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3853                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3854                             "vkCmdExecuteCommands() cannot be called in a subpass using inline commands.");
3855    }
3856    return skip_call;
3857}
3858
3859static bool checkGraphicsBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
3860    if (!(flags & VK_QUEUE_GRAPHICS_BIT))
3861        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3862                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3863                       "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
3864    return false;
3865}
3866
3867static bool checkComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
3868    if (!(flags & VK_QUEUE_COMPUTE_BIT))
3869        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3870                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3871                       "Cannot call %s on a command buffer allocated from a pool without compute capabilities.", name);
3872    return false;
3873}
3874
3875static bool checkGraphicsOrComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
3876    if (!((flags & VK_QUEUE_GRAPHICS_BIT) || (flags & VK_QUEUE_COMPUTE_BIT)))
3877        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3878                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3879                       "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
3880    return false;
3881}
3882
3883// Add specified CMD to the CmdBuffer in given pCB, flagging errors if CB is not
3884//  in the recording state or if there's an issue with the Cmd ordering
3885static bool addCmd(layer_data *my_data, GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd, const char *caller_name) {
3886    bool skip_call = false;
3887    auto pPool = getCommandPoolNode(my_data, pCB->createInfo.commandPool);
3888    if (pPool) {
3889        VkQueueFlags flags = my_data->phys_dev_properties.queue_family_properties[pPool->queueFamilyIndex].queueFlags;
3890        switch (cmd) {
3891        case CMD_BINDPIPELINE:
3892        case CMD_BINDPIPELINEDELTA:
3893        case CMD_BINDDESCRIPTORSETS:
3894        case CMD_FILLBUFFER:
3895        case CMD_CLEARCOLORIMAGE:
3896        case CMD_SETEVENT:
3897        case CMD_RESETEVENT:
3898        case CMD_WAITEVENTS:
3899        case CMD_BEGINQUERY:
3900        case CMD_ENDQUERY:
3901        case CMD_RESETQUERYPOOL:
3902        case CMD_COPYQUERYPOOLRESULTS:
3903        case CMD_WRITETIMESTAMP:
3904            skip_call |= checkGraphicsOrComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
3905            break;
3906        case CMD_SETVIEWPORTSTATE:
3907        case CMD_SETSCISSORSTATE:
3908        case CMD_SETLINEWIDTHSTATE:
3909        case CMD_SETDEPTHBIASSTATE:
3910        case CMD_SETBLENDSTATE:
3911        case CMD_SETDEPTHBOUNDSSTATE:
3912        case CMD_SETSTENCILREADMASKSTATE:
3913        case CMD_SETSTENCILWRITEMASKSTATE:
3914        case CMD_SETSTENCILREFERENCESTATE:
3915        case CMD_BINDINDEXBUFFER:
3916        case CMD_BINDVERTEXBUFFER:
3917        case CMD_DRAW:
3918        case CMD_DRAWINDEXED:
3919        case CMD_DRAWINDIRECT:
3920        case CMD_DRAWINDEXEDINDIRECT:
3921        case CMD_BLITIMAGE:
3922        case CMD_CLEARATTACHMENTS:
3923        case CMD_CLEARDEPTHSTENCILIMAGE:
3924        case CMD_RESOLVEIMAGE:
3925        case CMD_BEGINRENDERPASS:
3926        case CMD_NEXTSUBPASS:
3927        case CMD_ENDRENDERPASS:
3928            skip_call |= checkGraphicsBit(my_data, flags, cmdTypeToString(cmd).c_str());
3929            break;
3930        case CMD_DISPATCH:
3931        case CMD_DISPATCHINDIRECT:
3932            skip_call |= checkComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
3933            break;
3934        case CMD_COPYBUFFER:
3935        case CMD_COPYIMAGE:
3936        case CMD_COPYBUFFERTOIMAGE:
3937        case CMD_COPYIMAGETOBUFFER:
3938        case CMD_CLONEIMAGEDATA:
3939        case CMD_UPDATEBUFFER:
3940        case CMD_PIPELINEBARRIER:
3941        case CMD_EXECUTECOMMANDS:
3942        case CMD_END:
3943            break;
3944        default:
3945            break;
3946        }
3947    }
3948    if (pCB->state != CB_RECORDING) {
3949        skip_call |= report_error_no_cb_begin(my_data, pCB->commandBuffer, caller_name);
3950    } else {
3951        skip_call |= validateCmdsInCmdBuffer(my_data, pCB, cmd);
3952        CMD_NODE cmdNode = {};
3953        // init cmd node and append to end of cmd LL
3954        cmdNode.cmdNumber = ++pCB->numCmds;
3955        cmdNode.type = cmd;
3956        pCB->cmds.push_back(cmdNode);
3957    }
3958    return skip_call;
3959}
3960// For given object struct return a ptr of BASE_NODE type for its wrapping struct
3961BASE_NODE *GetStateStructPtrFromObject(layer_data *dev_data, VK_OBJECT object_struct) {
3962    BASE_NODE *base_ptr = nullptr;
3963    switch (object_struct.type) {
3964    case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT: {
3965        base_ptr = getSetNode(dev_data, reinterpret_cast<VkDescriptorSet &>(object_struct.handle));
3966        break;
3967    }
3968    case VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT: {
3969        base_ptr = getSamplerNode(dev_data, reinterpret_cast<VkSampler &>(object_struct.handle));
3970        break;
3971    }
3972    case VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT: {
3973        base_ptr = getQueryPoolNode(dev_data, reinterpret_cast<VkQueryPool &>(object_struct.handle));
3974        break;
3975    }
3976    case VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT: {
3977        base_ptr = getPipeline(dev_data, reinterpret_cast<VkPipeline &>(object_struct.handle));
3978        break;
3979    }
3980    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
3981        base_ptr = getBufferNode(dev_data, reinterpret_cast<VkBuffer &>(object_struct.handle));
3982        break;
3983    }
3984    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT: {
3985        base_ptr = getBufferViewState(dev_data, reinterpret_cast<VkBufferView &>(object_struct.handle));
3986        break;
3987    }
3988    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
3989        base_ptr = getImageNode(dev_data, reinterpret_cast<VkImage &>(object_struct.handle));
3990        break;
3991    }
3992    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT: {
3993        base_ptr = getImageViewState(dev_data, reinterpret_cast<VkImageView &>(object_struct.handle));
3994        break;
3995    }
3996    case VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT: {
3997        base_ptr = getEventNode(dev_data, reinterpret_cast<VkEvent &>(object_struct.handle));
3998        break;
3999    }
4000    case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT: {
4001        base_ptr = getPoolNode(dev_data, reinterpret_cast<VkDescriptorPool &>(object_struct.handle));
4002        break;
4003    }
4004    case VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT: {
4005        base_ptr = getCommandPoolNode(dev_data, reinterpret_cast<VkCommandPool &>(object_struct.handle));
4006        break;
4007    }
4008    case VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT: {
4009        base_ptr = getFramebuffer(dev_data, reinterpret_cast<VkFramebuffer &>(object_struct.handle));
4010        break;
4011    }
4012    case VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT: {
4013        base_ptr = getRenderPass(dev_data, reinterpret_cast<VkRenderPass &>(object_struct.handle));
4014        break;
4015    }
4016    case VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT: {
4017        base_ptr = getMemObjInfo(dev_data, reinterpret_cast<VkDeviceMemory &>(object_struct.handle));
4018        break;
4019    }
4020    default:
4021        // TODO : Any other objects to be handled here?
4022        assert(0);
4023        break;
4024    }
4025    return base_ptr;
4026}
4027
4028// Tie the VK_OBJECT to the cmd buffer which includes:
4029//  Add object_binding to cmd buffer
4030//  Add cb_binding to object
4031static void addCommandBufferBinding(std::unordered_set<GLOBAL_CB_NODE *> *cb_bindings, VK_OBJECT obj, GLOBAL_CB_NODE *cb_node) {
4032    cb_bindings->insert(cb_node);
4033    cb_node->object_bindings.insert(obj);
4034}
4035// For a given object, if cb_node is in that objects cb_bindings, remove cb_node
4036static void removeCommandBufferBinding(layer_data *dev_data, VK_OBJECT const *object, GLOBAL_CB_NODE *cb_node) {
4037    BASE_NODE *base_obj = GetStateStructPtrFromObject(dev_data, *object);
4038    if (base_obj)
4039        base_obj->cb_bindings.erase(cb_node);
4040}
4041// Reset the command buffer state
4042//  Maintain the createInfo and set state to CB_NEW, but clear all other state
4043static void resetCB(layer_data *dev_data, const VkCommandBuffer cb) {
4044    GLOBAL_CB_NODE *pCB = dev_data->commandBufferMap[cb];
4045    if (pCB) {
4046        pCB->in_use.store(0);
4047        pCB->cmds.clear();
4048        // Reset CB state (note that createInfo is not cleared)
4049        pCB->commandBuffer = cb;
4050        memset(&pCB->beginInfo, 0, sizeof(VkCommandBufferBeginInfo));
4051        memset(&pCB->inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo));
4052        pCB->numCmds = 0;
4053        memset(pCB->drawCount, 0, NUM_DRAW_TYPES * sizeof(uint64_t));
4054        pCB->state = CB_NEW;
4055        pCB->submitCount = 0;
4056        pCB->status = 0;
4057        pCB->viewportMask = 0;
4058        pCB->scissorMask = 0;
4059
4060        for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
4061            pCB->lastBound[i].reset();
4062        }
4063
4064        memset(&pCB->activeRenderPassBeginInfo, 0, sizeof(pCB->activeRenderPassBeginInfo));
4065        pCB->activeRenderPass = nullptr;
4066        pCB->activeSubpassContents = VK_SUBPASS_CONTENTS_INLINE;
4067        pCB->activeSubpass = 0;
4068        pCB->broken_bindings.clear();
4069        pCB->waitedEvents.clear();
4070        pCB->events.clear();
4071        pCB->writeEventsBeforeWait.clear();
4072        pCB->waitedEventsBeforeQueryReset.clear();
4073        pCB->queryToStateMap.clear();
4074        pCB->activeQueries.clear();
4075        pCB->startedQueries.clear();
4076        pCB->imageSubresourceMap.clear();
4077        pCB->imageLayoutMap.clear();
4078        pCB->eventToStageMap.clear();
4079        pCB->drawData.clear();
4080        pCB->currentDrawData.buffers.clear();
4081        pCB->primaryCommandBuffer = VK_NULL_HANDLE;
4082        // Make sure any secondaryCommandBuffers are removed from globalInFlight
4083        for (auto secondary_cb : pCB->secondaryCommandBuffers) {
4084            dev_data->globalInFlightCmdBuffers.erase(secondary_cb);
4085        }
4086        pCB->secondaryCommandBuffers.clear();
4087        pCB->updateImages.clear();
4088        pCB->updateBuffers.clear();
4089        clear_cmd_buf_and_mem_references(dev_data, pCB);
4090        pCB->eventUpdates.clear();
4091        pCB->queryUpdates.clear();
4092
4093        // Remove object bindings
4094        for (auto obj : pCB->object_bindings) {
4095            removeCommandBufferBinding(dev_data, &obj, pCB);
4096        }
4097        pCB->object_bindings.clear();
4098        // Remove this cmdBuffer's reference from each FrameBuffer's CB ref list
4099        for (auto framebuffer : pCB->framebuffers) {
4100            auto fb_node = getFramebuffer(dev_data, framebuffer);
4101            if (fb_node)
4102                fb_node->cb_bindings.erase(pCB);
4103        }
4104        pCB->framebuffers.clear();
4105        pCB->activeFramebuffer = VK_NULL_HANDLE;
4106    }
4107}
4108
4109// Set PSO-related status bits for CB, including dynamic state set via PSO
4110static void set_cb_pso_status(GLOBAL_CB_NODE *pCB, const PIPELINE_NODE *pPipe) {
4111    // Account for any dynamic state not set via this PSO
4112    if (!pPipe->graphicsPipelineCI.pDynamicState ||
4113        !pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount) { // All state is static
4114        pCB->status |= CBSTATUS_ALL;
4115    } else {
4116        // First consider all state on
4117        // Then unset any state that's noted as dynamic in PSO
4118        // Finally OR that into CB statemask
4119        CBStatusFlags psoDynStateMask = CBSTATUS_ALL;
4120        for (uint32_t i = 0; i < pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
4121            switch (pPipe->graphicsPipelineCI.pDynamicState->pDynamicStates[i]) {
4122            case VK_DYNAMIC_STATE_LINE_WIDTH:
4123                psoDynStateMask &= ~CBSTATUS_LINE_WIDTH_SET;
4124                break;
4125            case VK_DYNAMIC_STATE_DEPTH_BIAS:
4126                psoDynStateMask &= ~CBSTATUS_DEPTH_BIAS_SET;
4127                break;
4128            case VK_DYNAMIC_STATE_BLEND_CONSTANTS:
4129                psoDynStateMask &= ~CBSTATUS_BLEND_CONSTANTS_SET;
4130                break;
4131            case VK_DYNAMIC_STATE_DEPTH_BOUNDS:
4132                psoDynStateMask &= ~CBSTATUS_DEPTH_BOUNDS_SET;
4133                break;
4134            case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK:
4135                psoDynStateMask &= ~CBSTATUS_STENCIL_READ_MASK_SET;
4136                break;
4137            case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK:
4138                psoDynStateMask &= ~CBSTATUS_STENCIL_WRITE_MASK_SET;
4139                break;
4140            case VK_DYNAMIC_STATE_STENCIL_REFERENCE:
4141                psoDynStateMask &= ~CBSTATUS_STENCIL_REFERENCE_SET;
4142                break;
4143            default:
4144                // TODO : Flag error here
4145                break;
4146            }
4147        }
4148        pCB->status |= psoDynStateMask;
4149    }
4150}
4151
4152// Print the last bound Gfx Pipeline
4153static bool printPipeline(layer_data *my_data, const VkCommandBuffer cb) {
4154    bool skip_call = false;
4155    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb);
4156    if (pCB) {
4157        PIPELINE_NODE *pPipeTrav = pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline_node;
4158        if (!pPipeTrav) {
4159            // nothing to print
4160        } else {
4161            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
4162                                 __LINE__, DRAWSTATE_NONE, "DS", "%s",
4163                                 vk_print_vkgraphicspipelinecreateinfo(
4164                                     reinterpret_cast<const VkGraphicsPipelineCreateInfo *>(&pPipeTrav->graphicsPipelineCI), "{DS}")
4165                                     .c_str());
4166        }
4167    }
4168    return skip_call;
4169}
4170
4171static void printCB(layer_data *my_data, const VkCommandBuffer cb) {
4172    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb);
4173    if (pCB && pCB->cmds.size() > 0) {
4174        log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4175                DRAWSTATE_NONE, "DS", "Cmds in CB 0x%p", (void *)cb);
4176        vector<CMD_NODE> cmds = pCB->cmds;
4177        for (auto ii = cmds.begin(); ii != cmds.end(); ++ii) {
4178            // TODO : Need to pass cb as srcObj here
4179            log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4180                    __LINE__, DRAWSTATE_NONE, "DS", "  CMD 0x%" PRIx64 ": %s", (*ii).cmdNumber, cmdTypeToString((*ii).type).c_str());
4181        }
4182    } else {
4183        // Nothing to print
4184    }
4185}
4186
4187static bool synchAndPrintDSConfig(layer_data *my_data, const VkCommandBuffer cb) {
4188    bool skip_call = false;
4189    if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
4190        return skip_call;
4191    }
4192    skip_call |= printPipeline(my_data, cb);
4193    return skip_call;
4194}
4195
4196// Flags validation error if the associated call is made inside a render pass. The apiName
4197// routine should ONLY be called outside a render pass.
4198static bool insideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName) {
4199    bool inside = false;
4200    if (pCB->activeRenderPass) {
4201        inside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4202                         (uint64_t)pCB->commandBuffer, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
4203                         "%s: It is invalid to issue this call inside an active render pass (0x%" PRIxLEAST64 ")", apiName,
4204                         (uint64_t)pCB->activeRenderPass->renderPass);
4205    }
4206    return inside;
4207}
4208
4209// Flags validation error if the associated call is made outside a render pass. The apiName
4210// routine should ONLY be called inside a render pass.
4211static bool outsideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName) {
4212    bool outside = false;
4213    if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (!pCB->activeRenderPass)) ||
4214        ((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && (!pCB->activeRenderPass) &&
4215         !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))) {
4216        outside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4217                          (uint64_t)pCB->commandBuffer, __LINE__, DRAWSTATE_NO_ACTIVE_RENDERPASS, "DS",
4218                          "%s: This call must be issued inside an active render pass.", apiName);
4219    }
4220    return outside;
4221}
4222
4223static void init_core_validation(layer_data *instance_data, const VkAllocationCallbacks *pAllocator) {
4224
4225    layer_debug_actions(instance_data->report_data, instance_data->logging_callback, pAllocator, "lunarg_core_validation");
4226
4227}
4228
4229VKAPI_ATTR VkResult VKAPI_CALL
4230CreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkInstance *pInstance) {
4231    VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
4232
4233    assert(chain_info->u.pLayerInfo);
4234    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
4235    PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
4236    if (fpCreateInstance == NULL)
4237        return VK_ERROR_INITIALIZATION_FAILED;
4238
4239    // Advance the link info for the next element on the chain
4240    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
4241
4242    VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
4243    if (result != VK_SUCCESS)
4244        return result;
4245
4246    layer_data *instance_data = get_my_data_ptr(get_dispatch_key(*pInstance), layer_data_map);
4247    instance_data->instance = *pInstance;
4248    instance_data->instance_dispatch_table = new VkLayerInstanceDispatchTable;
4249    layer_init_instance_dispatch_table(*pInstance, instance_data->instance_dispatch_table, fpGetInstanceProcAddr);
4250
4251    instance_data->report_data =
4252        debug_report_create_instance(instance_data->instance_dispatch_table, *pInstance, pCreateInfo->enabledExtensionCount,
4253                                     pCreateInfo->ppEnabledExtensionNames);
4254    init_core_validation(instance_data, pAllocator);
4255
4256    instance_data->instance_state = unique_ptr<INSTANCE_STATE>(new INSTANCE_STATE());
4257    ValidateLayerOrdering(*pCreateInfo);
4258
4259    return result;
4260}
4261
4262/* hook DestroyInstance to remove tableInstanceMap entry */
4263VKAPI_ATTR void VKAPI_CALL DestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
4264    // TODOSC : Shouldn't need any customization here
4265    dispatch_key key = get_dispatch_key(instance);
4266    // TBD: Need any locking this early, in case this function is called at the
4267    // same time by more than one thread?
4268    layer_data *my_data = get_my_data_ptr(key, layer_data_map);
4269    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
4270    pTable->DestroyInstance(instance, pAllocator);
4271
4272    std::lock_guard<std::mutex> lock(global_lock);
4273    // Clean up logging callback, if any
4274    while (my_data->logging_callback.size() > 0) {
4275        VkDebugReportCallbackEXT callback = my_data->logging_callback.back();
4276        layer_destroy_msg_callback(my_data->report_data, callback, pAllocator);
4277        my_data->logging_callback.pop_back();
4278    }
4279
4280    layer_debug_report_destroy_instance(my_data->report_data);
4281    delete my_data->instance_dispatch_table;
4282    layer_data_map.erase(key);
4283}
4284
4285static void checkDeviceRegisterExtensions(const VkDeviceCreateInfo *pCreateInfo, VkDevice device) {
4286    uint32_t i;
4287    // TBD: Need any locking, in case this function is called at the same time
4288    // by more than one thread?
4289    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4290    dev_data->device_extensions.wsi_enabled = false;
4291    dev_data->device_extensions.wsi_display_swapchain_enabled = false;
4292
4293    for (i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
4294        if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME) == 0)
4295            dev_data->device_extensions.wsi_enabled = true;
4296        if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_DISPLAY_SWAPCHAIN_EXTENSION_NAME) == 0)
4297            dev_data->device_extensions.wsi_display_swapchain_enabled = true;
4298    }
4299}
4300
4301// Verify that queue family has been properly requested
4302bool ValidateRequestedQueueFamilyProperties(layer_data *dev_data, const VkDeviceCreateInfo *create_info) {
4303    bool skip_call = false;
4304    // First check is app has actually requested queueFamilyProperties
4305    if (!dev_data->physical_device_state) {
4306        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
4307                             0, __LINE__, DEVLIMITS_MUST_QUERY_COUNT, "DL",
4308                             "Invalid call to vkCreateDevice() w/o first calling vkEnumeratePhysicalDevices().");
4309    } else if (QUERY_DETAILS != dev_data->physical_device_state->vkGetPhysicalDeviceQueueFamilyPropertiesState) {
4310        // TODO: This is not called out as an invalid use in the spec so make more informative recommendation.
4311        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
4312                             VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST,
4313                             "DL", "Call to vkCreateDevice() w/o first calling vkGetPhysicalDeviceQueueFamilyProperties().");
4314    } else {
4315        // Check that the requested queue properties are valid
4316        for (uint32_t i = 0; i < create_info->queueCreateInfoCount; i++) {
4317            uint32_t requestedIndex = create_info->pQueueCreateInfos[i].queueFamilyIndex;
4318            if (dev_data->queue_family_properties.size() <=
4319                requestedIndex) { // requested index is out of bounds for this physical device
4320                skip_call |= log_msg(
4321                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
4322                    __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST, "DL",
4323                    "Invalid queue create request in vkCreateDevice(). Invalid queueFamilyIndex %u requested.", requestedIndex);
4324            } else if (create_info->pQueueCreateInfos[i].queueCount >
4325                       dev_data->queue_family_properties[requestedIndex]->queueCount) {
4326                skip_call |=
4327                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
4328                            0, __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST, "DL",
4329                            "Invalid queue create request in vkCreateDevice(). QueueFamilyIndex %u only has %u queues, but "
4330                            "requested queueCount is %u.",
4331                            requestedIndex, dev_data->queue_family_properties[requestedIndex]->queueCount,
4332                            create_info->pQueueCreateInfos[i].queueCount);
4333            }
4334        }
4335    }
4336    return skip_call;
4337}
4338
4339// Verify that features have been queried and that they are available
4340static bool ValidateRequestedFeatures(layer_data *dev_data, const VkPhysicalDeviceFeatures *requested_features) {
4341    bool skip_call = false;
4342
4343    VkBool32 *actual = reinterpret_cast<VkBool32 *>(&(dev_data->physical_device_features));
4344    const VkBool32 *requested = reinterpret_cast<const VkBool32 *>(requested_features);
4345    // TODO : This is a nice, compact way to loop through struct, but a bad way to report issues
4346    //  Need to provide the struct member name with the issue. To do that seems like we'll
4347    //  have to loop through each struct member which should be done w/ codegen to keep in synch.
4348    uint32_t errors = 0;
4349    uint32_t total_bools = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
4350    for (uint32_t i = 0; i < total_bools; i++) {
4351        if (requested[i] > actual[i]) {
4352            // TODO: Add index to struct member name helper to be able to include a feature name
4353            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4354                VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_FEATURE_REQUESTED,
4355                "DL", "While calling vkCreateDevice(), requesting feature #%u in VkPhysicalDeviceFeatures struct, "
4356                "which is not available on this device.",
4357                i);
4358            errors++;
4359        }
4360    }
4361    if (errors && (UNCALLED == dev_data->physical_device_state->vkGetPhysicalDeviceFeaturesState)) {
4362        // If user didn't request features, notify them that they should
4363        // TODO: Verify this against the spec. I believe this is an invalid use of the API and should return an error
4364        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4365                             VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_FEATURE_REQUESTED,
4366                             "DL", "You requested features that are unavailable on this device. You should first query feature "
4367                                   "availability by calling vkGetPhysicalDeviceFeatures().");
4368    }
4369    return skip_call;
4370}
4371
4372VKAPI_ATTR VkResult VKAPI_CALL CreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
4373                                            const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
4374    layer_data *my_instance_data = get_my_data_ptr(get_dispatch_key(gpu), layer_data_map);
4375    bool skip_call = false;
4376
4377    // Check that any requested features are available
4378    if (pCreateInfo->pEnabledFeatures) {
4379        skip_call |= ValidateRequestedFeatures(my_instance_data, pCreateInfo->pEnabledFeatures);
4380    }
4381    skip_call |= ValidateRequestedQueueFamilyProperties(my_instance_data, pCreateInfo);
4382
4383    if (skip_call) {
4384        return VK_ERROR_VALIDATION_FAILED_EXT;
4385    }
4386
4387    VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
4388
4389    assert(chain_info->u.pLayerInfo);
4390    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
4391    PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
4392    PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(my_instance_data->instance, "vkCreateDevice");
4393    if (fpCreateDevice == NULL) {
4394        return VK_ERROR_INITIALIZATION_FAILED;
4395    }
4396
4397    // Advance the link info for the next element on the chain
4398    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
4399
4400    VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
4401    if (result != VK_SUCCESS) {
4402        return result;
4403    }
4404
4405    std::unique_lock<std::mutex> lock(global_lock);
4406    layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(*pDevice), layer_data_map);
4407
4408    // Setup device dispatch table
4409    my_device_data->device_dispatch_table = new VkLayerDispatchTable;
4410    layer_init_device_dispatch_table(*pDevice, my_device_data->device_dispatch_table, fpGetDeviceProcAddr);
4411    my_device_data->device = *pDevice;
4412
4413    my_device_data->report_data = layer_debug_report_create_device(my_instance_data->report_data, *pDevice);
4414    checkDeviceRegisterExtensions(pCreateInfo, *pDevice);
4415    // Get physical device limits for this device
4416    my_instance_data->instance_dispatch_table->GetPhysicalDeviceProperties(gpu, &(my_device_data->phys_dev_properties.properties));
4417    uint32_t count;
4418    my_instance_data->instance_dispatch_table->GetPhysicalDeviceQueueFamilyProperties(gpu, &count, nullptr);
4419    my_device_data->phys_dev_properties.queue_family_properties.resize(count);
4420    my_instance_data->instance_dispatch_table->GetPhysicalDeviceQueueFamilyProperties(
4421        gpu, &count, &my_device_data->phys_dev_properties.queue_family_properties[0]);
4422    // TODO: device limits should make sure these are compatible
4423    if (pCreateInfo->pEnabledFeatures) {
4424        my_device_data->phys_dev_properties.features = *pCreateInfo->pEnabledFeatures;
4425    } else {
4426        memset(&my_device_data->phys_dev_properties.features, 0, sizeof(VkPhysicalDeviceFeatures));
4427    }
4428    // Store physical device mem limits into device layer_data struct
4429    my_instance_data->instance_dispatch_table->GetPhysicalDeviceMemoryProperties(gpu, &my_device_data->phys_dev_mem_props);
4430    lock.unlock();
4431
4432    ValidateLayerOrdering(*pCreateInfo);
4433
4434    return result;
4435}
4436
4437// prototype
4438static void deleteRenderPasses(layer_data *);
4439VKAPI_ATTR void VKAPI_CALL DestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
4440    // TODOSC : Shouldn't need any customization here
4441    dispatch_key key = get_dispatch_key(device);
4442    layer_data *dev_data = get_my_data_ptr(key, layer_data_map);
4443    // Free all the memory
4444    std::unique_lock<std::mutex> lock(global_lock);
4445    deletePipelines(dev_data);
4446    deleteRenderPasses(dev_data);
4447    deleteCommandBuffers(dev_data);
4448    // This will also delete all sets in the pool & remove them from setMap
4449    deletePools(dev_data);
4450    // All sets should be removed
4451    assert(dev_data->setMap.empty());
4452    for (auto del_layout : dev_data->descriptorSetLayoutMap) {
4453        delete del_layout.second;
4454    }
4455    dev_data->descriptorSetLayoutMap.clear();
4456    dev_data->imageViewMap.clear();
4457    dev_data->imageMap.clear();
4458    dev_data->imageSubresourceMap.clear();
4459    dev_data->imageLayoutMap.clear();
4460    dev_data->bufferViewMap.clear();
4461    dev_data->bufferMap.clear();
4462    // Queues persist until device is destroyed
4463    dev_data->queueMap.clear();
4464    lock.unlock();
4465#if MTMERGESOURCE
4466    bool skip_call = false;
4467    lock.lock();
4468    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
4469            (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM", "Printing List details prior to vkDestroyDevice()");
4470    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
4471            (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM", "================================================");
4472    print_mem_list(dev_data);
4473    printCBList(dev_data);
4474    // Report any memory leaks
4475    DEVICE_MEM_INFO *pInfo = NULL;
4476    if (!dev_data->memObjMap.empty()) {
4477        for (auto ii = dev_data->memObjMap.begin(); ii != dev_data->memObjMap.end(); ++ii) {
4478            pInfo = (*ii).second.get();
4479            if (pInfo->alloc_info.allocationSize != 0) {
4480                // Valid Usage: All child objects created on device must have been destroyed prior to destroying device
4481                skip_call |=
4482                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
4483                            (uint64_t)pInfo->mem, __LINE__, MEMTRACK_MEMORY_LEAK, "MEM",
4484                            "Mem Object 0x%" PRIx64 " has not been freed. You should clean up this memory by calling "
4485                            "vkFreeMemory(0x%" PRIx64 ") prior to vkDestroyDevice().",
4486                            (uint64_t)(pInfo->mem), (uint64_t)(pInfo->mem));
4487            }
4488        }
4489    }
4490    layer_debug_report_destroy_device(device);
4491    lock.unlock();
4492
4493#if DISPATCH_MAP_DEBUG
4494    fprintf(stderr, "Device: 0x%p, key: 0x%p\n", device, key);
4495#endif
4496    VkLayerDispatchTable *pDisp = dev_data->device_dispatch_table;
4497    if (!skip_call) {
4498        pDisp->DestroyDevice(device, pAllocator);
4499    }
4500#else
4501    dev_data->device_dispatch_table->DestroyDevice(device, pAllocator);
4502#endif
4503    delete dev_data->device_dispatch_table;
4504    layer_data_map.erase(key);
4505}
4506
4507static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION}};
4508
4509// This validates that the initial layout specified in the command buffer for
4510// the IMAGE is the same
4511// as the global IMAGE layout
4512static bool ValidateCmdBufImageLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
4513    bool skip_call = false;
4514    for (auto cb_image_data : pCB->imageLayoutMap) {
4515        VkImageLayout imageLayout;
4516        if (!FindLayout(dev_data, cb_image_data.first, imageLayout)) {
4517            skip_call |=
4518                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4519                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot submit cmd buffer using deleted image 0x%" PRIx64 ".",
4520                        reinterpret_cast<const uint64_t &>(cb_image_data.first));
4521        } else {
4522            if (cb_image_data.second.initialLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
4523                // TODO: Set memory invalid which is in mem_tracker currently
4524            } else if (imageLayout != cb_image_data.second.initialLayout) {
4525                if (cb_image_data.first.hasSubresource) {
4526                    skip_call |= log_msg(
4527                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4528                        reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
4529                        "Cannot submit cmd buffer using image (0x%" PRIx64 ") [sub-resource: aspectMask 0x%X array layer %u, mip level %u], "
4530                        "with layout %s when first use is %s.",
4531                        reinterpret_cast<const uint64_t &>(cb_image_data.first.image), cb_image_data.first.subresource.aspectMask,
4532                                cb_image_data.first.subresource.arrayLayer,
4533                                cb_image_data.first.subresource.mipLevel, string_VkImageLayout(imageLayout),
4534                        string_VkImageLayout(cb_image_data.second.initialLayout));
4535                } else {
4536                    skip_call |= log_msg(
4537                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4538                        reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
4539                        "Cannot submit cmd buffer using image (0x%" PRIx64 ") with layout %s when "
4540                        "first use is %s.",
4541                        reinterpret_cast<const uint64_t &>(cb_image_data.first.image), string_VkImageLayout(imageLayout),
4542                        string_VkImageLayout(cb_image_data.second.initialLayout));
4543                }
4544            }
4545            SetLayout(dev_data, cb_image_data.first, cb_image_data.second.layout);
4546        }
4547    }
4548    return skip_call;
4549}
4550
4551// Loop through bound objects and increment their in_use counts
4552//  For any unknown objects, flag an error
4553static bool ValidateAndIncrementBoundObjects(layer_data *dev_data, GLOBAL_CB_NODE const *cb_node) {
4554    bool skip = false;
4555    DRAW_STATE_ERROR error_code = DRAWSTATE_NONE;
4556    BASE_NODE *base_obj = nullptr;
4557    for (auto obj : cb_node->object_bindings) {
4558        switch (obj.type) {
4559        case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT: {
4560            base_obj = getSetNode(dev_data, reinterpret_cast<VkDescriptorSet &>(obj.handle));
4561            error_code = DRAWSTATE_INVALID_DESCRIPTOR_SET;
4562            break;
4563        }
4564        case VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT: {
4565            base_obj = getSamplerNode(dev_data, reinterpret_cast<VkSampler &>(obj.handle));
4566            error_code = DRAWSTATE_INVALID_SAMPLER;
4567            break;
4568        }
4569        case VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT: {
4570            base_obj = getQueryPoolNode(dev_data, reinterpret_cast<VkQueryPool &>(obj.handle));
4571            error_code = DRAWSTATE_INVALID_QUERY_POOL;
4572            break;
4573        }
4574        case VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT: {
4575            base_obj = getPipeline(dev_data, reinterpret_cast<VkPipeline &>(obj.handle));
4576            error_code = DRAWSTATE_INVALID_PIPELINE;
4577            break;
4578        }
4579        case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
4580            base_obj = getBufferNode(dev_data, reinterpret_cast<VkBuffer &>(obj.handle));
4581            error_code = DRAWSTATE_INVALID_BUFFER;
4582            break;
4583        }
4584        case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT: {
4585            base_obj = getBufferViewState(dev_data, reinterpret_cast<VkBufferView &>(obj.handle));
4586            error_code = DRAWSTATE_INVALID_BUFFER_VIEW;
4587            break;
4588        }
4589        case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
4590            base_obj = getImageNode(dev_data, reinterpret_cast<VkImage &>(obj.handle));
4591            error_code = DRAWSTATE_INVALID_IMAGE;
4592            break;
4593        }
4594        case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT: {
4595            base_obj = getImageViewState(dev_data, reinterpret_cast<VkImageView &>(obj.handle));
4596            error_code = DRAWSTATE_INVALID_IMAGE_VIEW;
4597            break;
4598        }
4599        case VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT: {
4600            base_obj = getEventNode(dev_data, reinterpret_cast<VkEvent &>(obj.handle));
4601            error_code = DRAWSTATE_INVALID_EVENT;
4602            break;
4603        }
4604        case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT: {
4605            base_obj = getPoolNode(dev_data, reinterpret_cast<VkDescriptorPool &>(obj.handle));
4606            error_code = DRAWSTATE_INVALID_DESCRIPTOR_POOL;
4607            break;
4608        }
4609        case VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT: {
4610            base_obj = getCommandPoolNode(dev_data, reinterpret_cast<VkCommandPool &>(obj.handle));
4611            error_code = DRAWSTATE_INVALID_COMMAND_POOL;
4612            break;
4613        }
4614        case VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT: {
4615            base_obj = getFramebuffer(dev_data, reinterpret_cast<VkFramebuffer &>(obj.handle));
4616            error_code = DRAWSTATE_INVALID_FRAMEBUFFER;
4617            break;
4618        }
4619        case VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT: {
4620            base_obj = getRenderPass(dev_data, reinterpret_cast<VkRenderPass &>(obj.handle));
4621            error_code = DRAWSTATE_INVALID_RENDERPASS;
4622            break;
4623        }
4624        case VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT: {
4625            base_obj = getMemObjInfo(dev_data, reinterpret_cast<VkDeviceMemory &>(obj.handle));
4626            error_code = DRAWSTATE_INVALID_DEVICE_MEMORY;
4627            break;
4628        }
4629        default:
4630            // TODO : Merge handling of other objects types into this code
4631            break;
4632        }
4633        if (!base_obj) {
4634            skip |=
4635                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj.type, obj.handle, __LINE__, error_code, "DS",
4636                        "Cannot submit cmd buffer using deleted %s 0x%" PRIx64 ".", object_type_to_string(obj.type), obj.handle);
4637        } else {
4638            base_obj->in_use.fetch_add(1);
4639        }
4640    }
4641    return skip;
4642}
4643
4644// Track which resources are in-flight by atomically incrementing their "in_use" count
4645static bool validateAndIncrementResources(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
4646    bool skip_call = false;
4647
4648    cb_node->in_use.fetch_add(1);
4649    dev_data->globalInFlightCmdBuffers.insert(cb_node->commandBuffer);
4650
4651    // First Increment for all "generic" objects bound to cmd buffer, followed by special-case objects below
4652    skip_call |= ValidateAndIncrementBoundObjects(dev_data, cb_node);
4653    // TODO : We should be able to remove the NULL look-up checks from the code below as long as
4654    //  all the corresponding cases are verified to cause CB_INVALID state and the CB_INVALID state
4655    //  should then be flagged prior to calling this function
4656    for (auto drawDataElement : cb_node->drawData) {
4657        for (auto buffer : drawDataElement.buffers) {
4658            auto buffer_node = getBufferNode(dev_data, buffer);
4659            if (!buffer_node) {
4660                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
4661                                     (uint64_t)(buffer), __LINE__, DRAWSTATE_INVALID_BUFFER, "DS",
4662                                     "Cannot submit cmd buffer using deleted buffer 0x%" PRIx64 ".", (uint64_t)(buffer));
4663            } else {
4664                buffer_node->in_use.fetch_add(1);
4665            }
4666        }
4667    }
4668    for (auto event : cb_node->writeEventsBeforeWait) {
4669        auto event_node = getEventNode(dev_data, event);
4670        if (event_node)
4671            event_node->write_in_use++;
4672    }
4673    return skip_call;
4674}
4675
4676// Note: This function assumes that the global lock is held by the calling
4677// thread.
4678// TODO: untangle this.
4679static bool cleanInFlightCmdBuffer(layer_data *my_data, VkCommandBuffer cmdBuffer) {
4680    bool skip_call = false;
4681    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cmdBuffer);
4682    if (pCB) {
4683        for (auto queryEventsPair : pCB->waitedEventsBeforeQueryReset) {
4684            for (auto event : queryEventsPair.second) {
4685                if (my_data->eventMap[event].needsSignaled) {
4686                    skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4687                                         VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, 0, DRAWSTATE_INVALID_QUERY, "DS",
4688                                         "Cannot get query results on queryPool 0x%" PRIx64
4689                                         " with index %d which was guarded by unsignaled event 0x%" PRIx64 ".",
4690                                         (uint64_t)(queryEventsPair.first.pool), queryEventsPair.first.index, (uint64_t)(event));
4691                }
4692            }
4693        }
4694    }
4695    return skip_call;
4696}
4697
4698// TODO: nuke this completely.
4699// Decrement cmd_buffer in_use and if it goes to 0 remove cmd_buffer from globalInFlightCmdBuffers
4700static inline void removeInFlightCmdBuffer(layer_data *dev_data, VkCommandBuffer cmd_buffer) {
4701    // Pull it off of global list initially, but if we find it in any other queue list, add it back in
4702    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmd_buffer);
4703    pCB->in_use.fetch_sub(1);
4704    if (!pCB->in_use.load()) {
4705        dev_data->globalInFlightCmdBuffers.erase(cmd_buffer);
4706    }
4707}
4708
4709// Decrement in-use count for objects bound to command buffer
4710static void DecrementBoundResources(layer_data *dev_data, GLOBAL_CB_NODE const *cb_node) {
4711    BASE_NODE *base_obj = nullptr;
4712    for (auto obj : cb_node->object_bindings) {
4713        base_obj = GetStateStructPtrFromObject(dev_data, obj);
4714        if (base_obj) {
4715            base_obj->in_use.fetch_sub(1);
4716        }
4717    }
4718}
4719
4720static bool RetireWorkOnQueue(layer_data *dev_data, QUEUE_NODE *pQueue, uint64_t seq)
4721{
4722    bool skip_call = false; // TODO: extract everything that might fail to precheck
4723    std::unordered_map<VkQueue, uint64_t> otherQueueSeqs;
4724
4725    // Roll this queue forward, one submission at a time.
4726    while (pQueue->seq < seq) {
4727        auto & submission = pQueue->submissions.front();
4728
4729        for (auto & wait : submission.waitSemaphores) {
4730            auto pSemaphore = getSemaphoreNode(dev_data, wait.semaphore);
4731            pSemaphore->in_use.fetch_sub(1);
4732            auto & lastSeq = otherQueueSeqs[wait.queue];
4733            lastSeq = std::max(lastSeq, wait.seq);
4734        }
4735
4736        for (auto & semaphore : submission.signalSemaphores) {
4737            auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
4738            pSemaphore->in_use.fetch_sub(1);
4739        }
4740
4741        for (auto cb : submission.cbs) {
4742            auto cb_node = getCBNode(dev_data, cb);
4743            // First perform decrement on general case bound objects
4744            DecrementBoundResources(dev_data, cb_node);
4745            for (auto drawDataElement : cb_node->drawData) {
4746                for (auto buffer : drawDataElement.buffers) {
4747                    auto buffer_node = getBufferNode(dev_data, buffer);
4748                    if (buffer_node) {
4749                        buffer_node->in_use.fetch_sub(1);
4750                    }
4751                }
4752            }
4753            for (auto event : cb_node->writeEventsBeforeWait) {
4754                auto eventNode = dev_data->eventMap.find(event);
4755                if (eventNode != dev_data->eventMap.end()) {
4756                    eventNode->second.write_in_use--;
4757                }
4758            }
4759            for (auto queryStatePair : cb_node->queryToStateMap) {
4760                dev_data->queryToStateMap[queryStatePair.first] = queryStatePair.second;
4761            }
4762            for (auto eventStagePair : cb_node->eventToStageMap) {
4763                dev_data->eventMap[eventStagePair.first].stageMask = eventStagePair.second;
4764            }
4765
4766            skip_call |= cleanInFlightCmdBuffer(dev_data, cb);
4767            removeInFlightCmdBuffer(dev_data, cb);
4768        }
4769
4770        auto pFence = getFenceNode(dev_data, submission.fence);
4771        if (pFence) {
4772            pFence->state = FENCE_RETIRED;
4773        }
4774
4775        pQueue->submissions.pop_front();
4776        pQueue->seq++;
4777    }
4778
4779    // Roll other queues forward to the highest seq we saw a wait for
4780    for (auto qs : otherQueueSeqs) {
4781        skip_call |= RetireWorkOnQueue(dev_data, getQueueNode(dev_data, qs.first), qs.second);
4782    }
4783
4784    return skip_call;
4785}
4786
4787
4788// Submit a fence to a queue, delimiting previous fences and previous untracked
4789// work by it.
4790static void
4791SubmitFence(QUEUE_NODE *pQueue, FENCE_NODE *pFence, uint64_t submitCount)
4792{
4793    pFence->state = FENCE_INFLIGHT;
4794    pFence->signaler.first = pQueue->queue;
4795    pFence->signaler.second = pQueue->seq + pQueue->submissions.size() + submitCount;
4796}
4797
4798static bool validateCommandBufferSimultaneousUse(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
4799    bool skip_call = false;
4800    if (dev_data->globalInFlightCmdBuffers.count(pCB->commandBuffer) &&
4801        !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
4802        skip_call |=
4803            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4804                    __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
4805                    "Command Buffer 0x%" PRIx64 " is already in use and is not marked for simultaneous use.",
4806                    reinterpret_cast<uint64_t>(pCB->commandBuffer));
4807    }
4808    return skip_call;
4809}
4810
4811static bool validateCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
4812    bool skip_call = false;
4813    // Validate ONE_TIME_SUBMIT_BIT CB is not being submitted more than once
4814    if ((pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) && (pCB->submitCount > 1)) {
4815        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4816                             0, __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
4817                             "CB 0x%" PRIxLEAST64 " was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT "
4818                             "set, but has been submitted 0x%" PRIxLEAST64 " times.",
4819                             (uint64_t)(pCB->commandBuffer), pCB->submitCount);
4820    }
4821    // Validate that cmd buffers have been updated
4822    if (CB_RECORDED != pCB->state) {
4823        if (CB_INVALID == pCB->state) {
4824            // Inform app of reason CB invalid
4825            for (auto obj : pCB->broken_bindings) {
4826                const char *type_str = object_type_to_string(obj.type);
4827                // Descriptor sets are a special case that can be either destroyed or updated to invalidated a CB
4828                const char *cause_str =
4829                    (obj.type == VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT) ? "destroyed or updated" : "destroyed";
4830
4831                skip_call |=
4832                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4833                            reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4834                            "You are submitting command buffer 0x%" PRIxLEAST64 " that is invalid because bound %s 0x%" PRIxLEAST64
4835                            " was %s.",
4836                            reinterpret_cast<uint64_t &>(pCB->commandBuffer), type_str, obj.handle, cause_str);
4837            }
4838        } else { // Flag error for using CB w/o vkEndCommandBuffer() called
4839            skip_call |=
4840                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4841                        (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_NO_END_COMMAND_BUFFER, "DS",
4842                        "You must call vkEndCommandBuffer() on CB 0x%" PRIxLEAST64 " before this call to vkQueueSubmit()!",
4843                        (uint64_t)(pCB->commandBuffer));
4844        }
4845    }
4846    return skip_call;
4847}
4848
4849// Validate that queueFamilyIndices of primary command buffers match this queue
4850// Secondary command buffers were previously validated in vkCmdExecuteCommands().
4851static bool validateQueueFamilyIndices(layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkQueue queue) {
4852    bool skip_call = false;
4853    auto pPool = getCommandPoolNode(dev_data, pCB->createInfo.commandPool);
4854    auto queue_node = getQueueNode(dev_data, queue);
4855
4856    if (pPool && queue_node && (pPool->queueFamilyIndex != queue_node->queueFamilyIndex)) {
4857        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4858            reinterpret_cast<uint64_t>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_QUEUE_FAMILY, "DS",
4859            "vkQueueSubmit: Primary command buffer 0x%" PRIxLEAST64
4860            " created in queue family %d is being submitted on queue 0x%" PRIxLEAST64 " from queue family %d.",
4861            reinterpret_cast<uint64_t>(pCB->commandBuffer), pPool->queueFamilyIndex,
4862            reinterpret_cast<uint64_t>(queue), queue_node->queueFamilyIndex);
4863    }
4864
4865    return skip_call;
4866}
4867
4868static bool validatePrimaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
4869    // Track in-use for resources off of primary and any secondary CBs
4870    bool skip_call = false;
4871
4872    // If USAGE_SIMULTANEOUS_USE_BIT not set then CB cannot already be executing
4873    // on device
4874    skip_call |= validateCommandBufferSimultaneousUse(dev_data, pCB);
4875
4876    skip_call |= validateAndIncrementResources(dev_data, pCB);
4877
4878    if (!pCB->secondaryCommandBuffers.empty()) {
4879        for (auto secondaryCmdBuffer : pCB->secondaryCommandBuffers) {
4880            GLOBAL_CB_NODE *pSubCB = getCBNode(dev_data, secondaryCmdBuffer);
4881            skip_call |= validateAndIncrementResources(dev_data, pSubCB);
4882            if ((pSubCB->primaryCommandBuffer != pCB->commandBuffer) &&
4883                !(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
4884                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4885                        __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
4886                        "CB 0x%" PRIxLEAST64 " was submitted with secondary buffer 0x%" PRIxLEAST64
4887                        " but that buffer has subsequently been bound to "
4888                        "primary cmd buffer 0x%" PRIxLEAST64
4889                        " and it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set.",
4890                        reinterpret_cast<uint64_t>(pCB->commandBuffer), reinterpret_cast<uint64_t>(secondaryCmdBuffer),
4891                        reinterpret_cast<uint64_t>(pSubCB->primaryCommandBuffer));
4892            }
4893        }
4894    }
4895
4896    skip_call |= validateCommandBufferState(dev_data, pCB);
4897
4898    return skip_call;
4899}
4900
4901static bool
4902ValidateFenceForSubmit(layer_data *dev_data, FENCE_NODE *pFence)
4903{
4904    bool skip_call = false;
4905
4906    if (pFence) {
4907        if (pFence->state == FENCE_INFLIGHT) {
4908            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4909                                 (uint64_t)(pFence->fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
4910                                 "Fence 0x%" PRIx64 " is already in use by another submission.", (uint64_t)(pFence->fence));
4911        }
4912
4913        else if (pFence->state == FENCE_RETIRED) {
4914            skip_call |=
4915                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4916                        reinterpret_cast<uint64_t &>(pFence->fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
4917                        "Fence 0x%" PRIxLEAST64 " submitted in SIGNALED state.  Fences must be reset before being submitted",
4918                        reinterpret_cast<uint64_t &>(pFence->fence));
4919        }
4920    }
4921
4922    return skip_call;
4923}
4924
4925
4926VKAPI_ATTR VkResult VKAPI_CALL
4927QueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
4928    bool skip_call = false;
4929    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
4930    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
4931    std::unique_lock<std::mutex> lock(global_lock);
4932
4933    auto pQueue = getQueueNode(dev_data, queue);
4934    auto pFence = getFenceNode(dev_data, fence);
4935    skip_call |= ValidateFenceForSubmit(dev_data, pFence);
4936
4937    if (skip_call) {
4938        return VK_ERROR_VALIDATION_FAILED_EXT;
4939    }
4940
4941    // TODO : Review these old print functions and clean up as appropriate
4942    print_mem_list(dev_data);
4943    printCBList(dev_data);
4944
4945    // Mark the fence in-use.
4946    if (pFence) {
4947        SubmitFence(pQueue, pFence, std::max(1u, submitCount));
4948    }
4949
4950    // Now verify each individual submit
4951    for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
4952        const VkSubmitInfo *submit = &pSubmits[submit_idx];
4953        vector<SEMAPHORE_WAIT> semaphore_waits;
4954        vector<VkSemaphore> semaphore_signals;
4955        for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
4956            VkSemaphore semaphore = submit->pWaitSemaphores[i];
4957            auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
4958            if (pSemaphore) {
4959                if (pSemaphore->signaled) {
4960                    if (pSemaphore->signaler.first != VK_NULL_HANDLE) {
4961                        semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second});
4962                        pSemaphore->in_use.fetch_add(1);
4963                    }
4964                    pSemaphore->signaler.first = VK_NULL_HANDLE;
4965                    pSemaphore->signaled = false;
4966                } else {
4967                    skip_call |=
4968                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
4969                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
4970                                "Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.",
4971                                reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
4972                }
4973            }
4974        }
4975        for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
4976            VkSemaphore semaphore = submit->pSignalSemaphores[i];
4977            auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
4978            if (pSemaphore) {
4979                if (pSemaphore->signaled) {
4980                    skip_call |=
4981                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
4982                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
4983                                "Queue 0x%" PRIx64 " is signaling semaphore 0x%" PRIx64
4984                                " that has already been signaled but not waited on by queue 0x%" PRIx64 ".",
4985                                reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore),
4986                                reinterpret_cast<uint64_t &>(pSemaphore->signaler.first));
4987                } else {
4988                    pSemaphore->signaler.first = queue;
4989                    pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1;
4990                    pSemaphore->signaled = true;
4991                    pSemaphore->in_use.fetch_add(1);
4992                    semaphore_signals.push_back(semaphore);
4993                }
4994            }
4995        }
4996
4997        std::vector<VkCommandBuffer> cbs;
4998
4999        for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
5000            auto pCBNode = getCBNode(dev_data, submit->pCommandBuffers[i]);
5001            skip_call |= ValidateCmdBufImageLayouts(dev_data, pCBNode);
5002            if (pCBNode) {
5003                cbs.push_back(submit->pCommandBuffers[i]);
5004                for (auto secondaryCmdBuffer : pCBNode->secondaryCommandBuffers) {
5005                    cbs.push_back(secondaryCmdBuffer);
5006                }
5007
5008                pCBNode->submitCount++; // increment submit count
5009                skip_call |= validatePrimaryCommandBufferState(dev_data, pCBNode);
5010                skip_call |= validateQueueFamilyIndices(dev_data, pCBNode, queue);
5011                // Potential early exit here as bad object state may crash in delayed function calls
5012                if (skip_call)
5013                    return result;
5014                // Call submit-time functions to validate/update state
5015                for (auto &function : pCBNode->validate_functions) {
5016                    skip_call |= function();
5017                }
5018                for (auto &function : pCBNode->eventUpdates) {
5019                    skip_call |= function(queue);
5020                }
5021                for (auto &function : pCBNode->queryUpdates) {
5022                    skip_call |= function(queue);
5023                }
5024            }
5025        }
5026
5027        pQueue->submissions.emplace_back(cbs, semaphore_waits, semaphore_signals,
5028                                         submit_idx == submitCount - 1 ? fence : VK_NULL_HANDLE);
5029    }
5030
5031    if (pFence && !submitCount) {
5032        // If no submissions, but just dropping a fence on the end of the queue,
5033        // record an empty submission with just the fence, so we can determine
5034        // its completion.
5035        pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(),
5036                                         std::vector<SEMAPHORE_WAIT>(),
5037                                         std::vector<VkSemaphore>(),
5038                                         fence);
5039    }
5040
5041    lock.unlock();
5042    if (!skip_call)
5043        result = dev_data->device_dispatch_table->QueueSubmit(queue, submitCount, pSubmits, fence);
5044
5045    return result;
5046}
5047
5048VKAPI_ATTR VkResult VKAPI_CALL AllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
5049                                              const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) {
5050    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5051    VkResult result = my_data->device_dispatch_table->AllocateMemory(device, pAllocateInfo, pAllocator, pMemory);
5052    // TODO : Track allocations and overall size here
5053    std::lock_guard<std::mutex> lock(global_lock);
5054    add_mem_obj_info(my_data, device, *pMemory, pAllocateInfo);
5055    print_mem_list(my_data);
5056    return result;
5057}
5058
5059VKAPI_ATTR void VKAPI_CALL
5060FreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
5061    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5062
5063    // From spec : A memory object is freed by calling vkFreeMemory() when it is no longer needed.
5064    // Before freeing a memory object, an application must ensure the memory object is no longer
5065    // in use by the device—for example by command buffers queued for execution. The memory need
5066    // not yet be unbound from all images and buffers, but any further use of those images or
5067    // buffers (on host or device) for anything other than destroying those objects will result in
5068    // undefined behavior.
5069
5070    std::unique_lock<std::mutex> lock(global_lock);
5071    bool skip_call = freeMemObjInfo(my_data, device, mem, false);
5072    print_mem_list(my_data);
5073    printCBList(my_data);
5074    lock.unlock();
5075    if (!skip_call) {
5076        my_data->device_dispatch_table->FreeMemory(device, mem, pAllocator);
5077    }
5078}
5079
5080// Validate that given Map memory range is valid. This means that the memory should not already be mapped,
5081//  and that the size of the map range should be:
5082//  1. Not zero
5083//  2. Within the size of the memory allocation
5084static bool ValidateMapMemRange(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
5085    bool skip_call = false;
5086
5087    if (size == 0) {
5088        skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5089                            (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5090                            "VkMapMemory: Attempting to map memory range of size zero");
5091    }
5092
5093    auto mem_element = my_data->memObjMap.find(mem);
5094    if (mem_element != my_data->memObjMap.end()) {
5095        auto mem_info = mem_element->second.get();
5096        // It is an application error to call VkMapMemory on an object that is already mapped
5097        if (mem_info->mem_range.size != 0) {
5098            skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5099                                (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5100                                "VkMapMemory: Attempting to map memory on an already-mapped object 0x%" PRIxLEAST64, (uint64_t)mem);
5101        }
5102
5103        // Validate that offset + size is within object's allocationSize
5104        if (size == VK_WHOLE_SIZE) {
5105            if (offset >= mem_info->alloc_info.allocationSize) {
5106                skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5107                                    VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP,
5108                                    "MEM", "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64
5109                                           " with size of VK_WHOLE_SIZE oversteps total array size 0x%" PRIx64,
5110                                    offset, mem_info->alloc_info.allocationSize, mem_info->alloc_info.allocationSize);
5111            }
5112        } else {
5113            if ((offset + size) > mem_info->alloc_info.allocationSize) {
5114                skip_call =
5115                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5116                            (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5117                            "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64 " oversteps total array size 0x%" PRIx64, offset,
5118                            size + offset, mem_info->alloc_info.allocationSize);
5119            }
5120        }
5121    }
5122    return skip_call;
5123}
5124
5125static void storeMemRanges(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
5126    auto mem_info = getMemObjInfo(my_data, mem);
5127    if (mem_info) {
5128        mem_info->mem_range.offset = offset;
5129        mem_info->mem_range.size = size;
5130    }
5131}
5132
5133static bool deleteMemRanges(layer_data *my_data, VkDeviceMemory mem) {
5134    bool skip_call = false;
5135    auto mem_info = getMemObjInfo(my_data, mem);
5136    if (mem_info) {
5137        if (!mem_info->mem_range.size) {
5138            // Valid Usage: memory must currently be mapped
5139            skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5140                                (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5141                                "Unmapping Memory without memory being mapped: mem obj 0x%" PRIxLEAST64, (uint64_t)mem);
5142        }
5143        mem_info->mem_range.size = 0;
5144        if (mem_info->shadow_copy) {
5145            free(mem_info->shadow_copy_base);
5146            mem_info->shadow_copy_base = 0;
5147            mem_info->shadow_copy = 0;
5148        }
5149    }
5150    return skip_call;
5151}
5152
5153// Guard value for pad data
5154static char NoncoherentMemoryFillValue = 0xb;
5155
5156static void initializeAndTrackMemory(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size,
5157                                     void **ppData) {
5158    auto mem_info = getMemObjInfo(dev_data, mem);
5159    if (mem_info) {
5160        mem_info->p_driver_data = *ppData;
5161        uint32_t index = mem_info->alloc_info.memoryTypeIndex;
5162        if (dev_data->phys_dev_mem_props.memoryTypes[index].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) {
5163            mem_info->shadow_copy = 0;
5164        } else {
5165            if (size == VK_WHOLE_SIZE) {
5166                size = mem_info->alloc_info.allocationSize - offset;
5167            }
5168            mem_info->shadow_pad_size = dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment;
5169            assert(vk_safe_modulo(mem_info->shadow_pad_size,
5170                                  dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment) == 0);
5171            // Ensure start of mapped region reflects hardware alignment constraints
5172            uint64_t map_alignment = dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment;
5173
5174            // From spec: (ppData - offset) must be aligned to at least limits::minMemoryMapAlignment.
5175            uint64_t start_offset = offset % map_alignment;
5176            // Data passed to driver will be wrapped by a guardband of data to detect over- or under-writes.
5177            mem_info->shadow_copy_base = malloc(2 * mem_info->shadow_pad_size + size + map_alignment + start_offset);
5178
5179            mem_info->shadow_copy =
5180                reinterpret_cast<char *>((reinterpret_cast<uintptr_t>(mem_info->shadow_copy_base) + map_alignment) &
5181                                         ~(map_alignment - 1)) + start_offset;
5182            assert(vk_safe_modulo(reinterpret_cast<uintptr_t>(mem_info->shadow_copy) + mem_info->shadow_pad_size - start_offset,
5183                                  map_alignment) == 0);
5184
5185            memset(mem_info->shadow_copy, NoncoherentMemoryFillValue, 2 * mem_info->shadow_pad_size + size);
5186            *ppData = static_cast<char *>(mem_info->shadow_copy) + mem_info->shadow_pad_size;
5187        }
5188    }
5189}
5190
5191// Verify that state for fence being waited on is appropriate. That is,
5192//  a fence being waited on should not already be signaled and
5193//  it should have been submitted on a queue or during acquire next image
5194static inline bool verifyWaitFenceState(layer_data *dev_data, VkFence fence, const char *apiCall) {
5195    bool skip_call = false;
5196
5197    auto pFence = getFenceNode(dev_data, fence);
5198    if (pFence) {
5199        if (pFence->state == FENCE_UNSIGNALED) {
5200            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5201                                 reinterpret_cast<uint64_t &>(fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
5202                                 "%s called for fence 0x%" PRIxLEAST64 " which has not been submitted on a Queue or during "
5203                                 "acquire next image.",
5204                                 apiCall, reinterpret_cast<uint64_t &>(fence));
5205        }
5206    }
5207    return skip_call;
5208}
5209
5210static bool RetireFence(layer_data *dev_data, VkFence fence) {
5211    auto pFence = getFenceNode(dev_data, fence);
5212    if (pFence->signaler.first != VK_NULL_HANDLE) {
5213        /* Fence signaller is a queue -- use this as proof that prior operations
5214         * on that queue have completed.
5215         */
5216        return RetireWorkOnQueue(dev_data,
5217                                 getQueueNode(dev_data, pFence->signaler.first),
5218                                 pFence->signaler.second);
5219    }
5220    else {
5221        /* Fence signaller is the WSI. We're not tracking what the WSI op
5222         * actually /was/ in CV yet, but we need to mark the fence as retired.
5223         */
5224        pFence->state = FENCE_RETIRED;
5225        return false;
5226    }
5227}
5228
5229VKAPI_ATTR VkResult VKAPI_CALL
5230WaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll, uint64_t timeout) {
5231    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5232    bool skip_call = false;
5233    // Verify fence status of submitted fences
5234    std::unique_lock<std::mutex> lock(global_lock);
5235    for (uint32_t i = 0; i < fenceCount; i++) {
5236        skip_call |= verifyWaitFenceState(dev_data, pFences[i], "vkWaitForFences");
5237    }
5238    lock.unlock();
5239    if (skip_call)
5240        return VK_ERROR_VALIDATION_FAILED_EXT;
5241
5242    VkResult result = dev_data->device_dispatch_table->WaitForFences(device, fenceCount, pFences, waitAll, timeout);
5243
5244    if (result == VK_SUCCESS) {
5245        lock.lock();
5246        // When we know that all fences are complete we can clean/remove their CBs
5247        if (waitAll || fenceCount == 1) {
5248            for (uint32_t i = 0; i < fenceCount; i++) {
5249                skip_call |= RetireFence(dev_data, pFences[i]);
5250            }
5251        }
5252        // NOTE : Alternate case not handled here is when some fences have completed. In
5253        //  this case for app to guarantee which fences completed it will have to call
5254        //  vkGetFenceStatus() at which point we'll clean/remove their CBs if complete.
5255        lock.unlock();
5256    }
5257    if (skip_call)
5258        return VK_ERROR_VALIDATION_FAILED_EXT;
5259    return result;
5260}
5261
5262VKAPI_ATTR VkResult VKAPI_CALL GetFenceStatus(VkDevice device, VkFence fence) {
5263    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5264    bool skip_call = false;
5265    std::unique_lock<std::mutex> lock(global_lock);
5266    skip_call = verifyWaitFenceState(dev_data, fence, "vkGetFenceStatus");
5267    lock.unlock();
5268
5269    if (skip_call)
5270        return VK_ERROR_VALIDATION_FAILED_EXT;
5271
5272    VkResult result = dev_data->device_dispatch_table->GetFenceStatus(device, fence);
5273    lock.lock();
5274    if (result == VK_SUCCESS) {
5275        skip_call |= RetireFence(dev_data, fence);
5276    }
5277    lock.unlock();
5278    if (skip_call)
5279        return VK_ERROR_VALIDATION_FAILED_EXT;
5280    return result;
5281}
5282
5283VKAPI_ATTR void VKAPI_CALL GetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex,
5284                                                            VkQueue *pQueue) {
5285    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5286    dev_data->device_dispatch_table->GetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue);
5287    std::lock_guard<std::mutex> lock(global_lock);
5288
5289    // Add queue to tracking set only if it is new
5290    auto result = dev_data->queues.emplace(*pQueue);
5291    if (result.second == true) {
5292        QUEUE_NODE *pQNode = &dev_data->queueMap[*pQueue];
5293        pQNode->queue = *pQueue;
5294        pQNode->queueFamilyIndex = queueFamilyIndex;
5295        pQNode->seq = 0;
5296    }
5297}
5298
5299VKAPI_ATTR VkResult VKAPI_CALL QueueWaitIdle(VkQueue queue) {
5300    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
5301    bool skip_call = false;
5302    std::unique_lock<std::mutex> lock(global_lock);
5303    auto pQueue = getQueueNode(dev_data, queue);
5304    skip_call |= RetireWorkOnQueue(dev_data, pQueue, pQueue->seq + pQueue->submissions.size());
5305    lock.unlock();
5306    if (skip_call)
5307        return VK_ERROR_VALIDATION_FAILED_EXT;
5308    VkResult result = dev_data->device_dispatch_table->QueueWaitIdle(queue);
5309    return result;
5310}
5311
5312VKAPI_ATTR VkResult VKAPI_CALL DeviceWaitIdle(VkDevice device) {
5313    bool skip_call = false;
5314    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5315    std::unique_lock<std::mutex> lock(global_lock);
5316    for (auto & queue : dev_data->queueMap) {
5317        skip_call |= RetireWorkOnQueue(dev_data, &queue.second, queue.second.seq + queue.second.submissions.size());
5318    }
5319    lock.unlock();
5320    if (skip_call)
5321        return VK_ERROR_VALIDATION_FAILED_EXT;
5322    VkResult result = dev_data->device_dispatch_table->DeviceWaitIdle(device);
5323    return result;
5324}
5325
5326VKAPI_ATTR void VKAPI_CALL DestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) {
5327    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5328    bool skip_call = false;
5329    std::unique_lock<std::mutex> lock(global_lock);
5330    auto fence_pair = dev_data->fenceMap.find(fence);
5331    if (fence_pair != dev_data->fenceMap.end()) {
5332        if (fence_pair->second.state == FENCE_INFLIGHT) {
5333            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5334                                 (uint64_t)(fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS", "Fence 0x%" PRIx64 " is in use.",
5335                                 (uint64_t)(fence));
5336        }
5337        dev_data->fenceMap.erase(fence_pair);
5338    }
5339    lock.unlock();
5340
5341    if (!skip_call)
5342        dev_data->device_dispatch_table->DestroyFence(device, fence, pAllocator);
5343}
5344
5345// For given obj node, if it is use, flag a validation error and return callback result, else return false
5346bool ValidateObjectNotInUse(const layer_data *dev_data, BASE_NODE *obj_node, VK_OBJECT obj_struct) {
5347    bool skip = false;
5348    if (obj_node->in_use.load()) {
5349        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_struct.type, obj_struct.handle, __LINE__,
5350                        DRAWSTATE_OBJECT_INUSE, "DS", "Cannot delete %s 0x%" PRIx64 " that is currently in use by a command buffer.",
5351                        object_type_to_string(obj_struct.type), obj_struct.handle);
5352    }
5353    return skip;
5354}
5355
5356VKAPI_ATTR void VKAPI_CALL
5357DestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) {
5358    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5359    bool skip = false;
5360    std::unique_lock<std::mutex> lock(global_lock);
5361    auto sema_node = getSemaphoreNode(dev_data, semaphore);
5362    if (sema_node) {
5363        skip |= ValidateObjectNotInUse(dev_data, sema_node,
5364                                       {reinterpret_cast<uint64_t &>(semaphore), VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT});
5365    }
5366    if (!skip) {
5367        dev_data->semaphoreMap.erase(semaphore);
5368        lock.unlock();
5369        dev_data->device_dispatch_table->DestroySemaphore(device, semaphore, pAllocator);
5370    }
5371}
5372
5373VKAPI_ATTR void VKAPI_CALL DestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
5374    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5375    bool skip = false;
5376    std::unique_lock<std::mutex> lock(global_lock);
5377    auto event_node = getEventNode(dev_data, event);
5378    if (event_node) {
5379        VK_OBJECT obj_struct = {reinterpret_cast<uint64_t &>(event), VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT};
5380        skip |= ValidateObjectNotInUse(dev_data, event_node, obj_struct);
5381        // Any bound cmd buffers are now invalid
5382        invalidateCommandBuffers(event_node->cb_bindings, obj_struct);
5383    }
5384    if (!skip) {
5385        dev_data->eventMap.erase(event);
5386        lock.unlock();
5387        dev_data->device_dispatch_table->DestroyEvent(device, event, pAllocator);
5388    }
5389}
5390
5391VKAPI_ATTR void VKAPI_CALL
5392DestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) {
5393    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5394    bool skip = false;
5395    std::unique_lock<std::mutex> lock(global_lock);
5396    auto qp_node = getQueryPoolNode(dev_data, queryPool);
5397    if (qp_node) {
5398        VK_OBJECT obj_struct = {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT};
5399        skip |= ValidateObjectNotInUse(dev_data, qp_node, obj_struct);
5400        // Any bound cmd buffers are now invalid
5401        invalidateCommandBuffers(qp_node->cb_bindings, obj_struct);
5402    }
5403    if (!skip) {
5404        dev_data->queryPoolMap.erase(queryPool);
5405        lock.unlock();
5406        dev_data->device_dispatch_table->DestroyQueryPool(device, queryPool, pAllocator);
5407    }
5408}
5409
5410VKAPI_ATTR VkResult VKAPI_CALL GetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery,
5411                                                   uint32_t queryCount, size_t dataSize, void *pData, VkDeviceSize stride,
5412                                                   VkQueryResultFlags flags) {
5413    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5414    unordered_map<QueryObject, vector<VkCommandBuffer>> queriesInFlight;
5415    std::unique_lock<std::mutex> lock(global_lock);
5416    for (auto cmdBuffer : dev_data->globalInFlightCmdBuffers) {
5417        auto pCB = getCBNode(dev_data, cmdBuffer);
5418        for (auto queryStatePair : pCB->queryToStateMap) {
5419            queriesInFlight[queryStatePair.first].push_back(cmdBuffer);
5420        }
5421    }
5422    bool skip_call = false;
5423    for (uint32_t i = 0; i < queryCount; ++i) {
5424        QueryObject query = {queryPool, firstQuery + i};
5425        auto queryElement = queriesInFlight.find(query);
5426        auto queryToStateElement = dev_data->queryToStateMap.find(query);
5427        if (queryToStateElement != dev_data->queryToStateMap.end()) {
5428            // Available and in flight
5429            if (queryElement != queriesInFlight.end() && queryToStateElement != dev_data->queryToStateMap.end() &&
5430                queryToStateElement->second) {
5431                for (auto cmdBuffer : queryElement->second) {
5432                    auto pCB = getCBNode(dev_data, cmdBuffer);
5433                    auto queryEventElement = pCB->waitedEventsBeforeQueryReset.find(query);
5434                    if (queryEventElement == pCB->waitedEventsBeforeQueryReset.end()) {
5435                        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5436                                             VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5437                                             "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is in flight.",
5438                                             (uint64_t)(queryPool), firstQuery + i);
5439                    } else {
5440                        for (auto event : queryEventElement->second) {
5441                            dev_data->eventMap[event].needsSignaled = true;
5442                        }
5443                    }
5444                }
5445                // Unavailable and in flight
5446            } else if (queryElement != queriesInFlight.end() && queryToStateElement != dev_data->queryToStateMap.end() &&
5447                       !queryToStateElement->second) {
5448                // TODO : Can there be the same query in use by multiple command buffers in flight?
5449                bool make_available = false;
5450                for (auto cmdBuffer : queryElement->second) {
5451                    auto pCB = getCBNode(dev_data, cmdBuffer);
5452                    make_available |= pCB->queryToStateMap[query];
5453                }
5454                if (!(((flags & VK_QUERY_RESULT_PARTIAL_BIT) || (flags & VK_QUERY_RESULT_WAIT_BIT)) && make_available)) {
5455                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5456                                         VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5457                                         "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is unavailable.",
5458                                         (uint64_t)(queryPool), firstQuery + i);
5459                }
5460                // Unavailable
5461            } else if (queryToStateElement != dev_data->queryToStateMap.end() && !queryToStateElement->second) {
5462                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5463                                     VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5464                                     "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is unavailable.",
5465                                     (uint64_t)(queryPool), firstQuery + i);
5466                // Unitialized
5467            } else if (queryToStateElement == dev_data->queryToStateMap.end()) {
5468                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5469                                     VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5470                                     "Cannot get query results on queryPool 0x%" PRIx64
5471                                     " with index %d as data has not been collected for this index.",
5472                                     (uint64_t)(queryPool), firstQuery + i);
5473            }
5474        }
5475    }
5476    lock.unlock();
5477    if (skip_call)
5478        return VK_ERROR_VALIDATION_FAILED_EXT;
5479    return dev_data->device_dispatch_table->GetQueryPoolResults(device, queryPool, firstQuery, queryCount, dataSize, pData, stride,
5480                                                                flags);
5481}
5482
5483static bool validateIdleBuffer(const layer_data *my_data, VkBuffer buffer) {
5484    bool skip_call = false;
5485    auto buffer_node = getBufferNode(my_data, buffer);
5486    if (!buffer_node) {
5487        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5488                             (uint64_t)(buffer), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
5489                             "Cannot free buffer 0x%" PRIxLEAST64 " that has not been allocated.", (uint64_t)(buffer));
5490    } else {
5491        if (buffer_node->in_use.load()) {
5492            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5493                                 (uint64_t)(buffer), __LINE__, DRAWSTATE_OBJECT_INUSE, "DS",
5494                                 "Cannot free buffer 0x%" PRIxLEAST64 " that is in use by a command buffer.", (uint64_t)(buffer));
5495        }
5496    }
5497    return skip_call;
5498}
5499
5500// Return true if given ranges intersect, else false
5501// Prereq : For both ranges, range->end - range->start > 0. This case should have already resulted
5502//  in an error so not checking that here
5503// pad_ranges bool indicates a linear and non-linear comparison which requires padding
5504// In the case where padding is required, if an alias is encountered then a validation error is reported and skip_call
5505//  may be set by the callback function so caller should merge in skip_call value if padding case is possible.
5506static bool rangesIntersect(layer_data const *dev_data, MEMORY_RANGE const *range1, MEMORY_RANGE const *range2, bool *skip_call) {
5507    *skip_call = false;
5508    auto r1_start = range1->start;
5509    auto r1_end = range1->end;
5510    auto r2_start = range2->start;
5511    auto r2_end = range2->end;
5512    VkDeviceSize pad_align = 1;
5513    if (range1->linear != range2->linear) {
5514        pad_align = dev_data->phys_dev_properties.properties.limits.bufferImageGranularity;
5515    }
5516    if ((r1_end & ~(pad_align - 1)) < (r2_start & ~(pad_align - 1)))
5517        return false;
5518    if ((r1_start & ~(pad_align - 1)) > (r2_end & ~(pad_align - 1)))
5519        return false;
5520
5521    if (range1->linear != range2->linear) {
5522        // In linear vs. non-linear case, it's an error to alias
5523        const char *r1_linear_str = range1->linear ? "Linear" : "Non-linear";
5524        const char *r1_type_str = range1->image ? "image" : "buffer";
5525        const char *r2_linear_str = range2->linear ? "linear" : "non-linear";
5526        const char *r2_type_str = range2->image ? "image" : "buffer";
5527        auto obj_type = range1->image ? VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT : VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT;
5528        *skip_call |=
5529            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_type, range1->handle, 0, MEMTRACK_INVALID_ALIASING,
5530                    "MEM", "%s %s 0x%" PRIx64 " is aliased with %s %s 0x%" PRIx64
5531                           " which is in violation of the Buffer-Image Granularity section of the Vulkan specification.",
5532                    r1_linear_str, r1_type_str, range1->handle, r2_linear_str, r2_type_str, range2->handle);
5533    }
5534    // Ranges intersect
5535    return true;
5536}
5537// Simplified rangesIntersect that calls above function to check range1 for intersection with offset & end addresses
5538static bool rangesIntersect(layer_data const *dev_data, MEMORY_RANGE const *range1, VkDeviceSize offset, VkDeviceSize end) {
5539    // Create a local MEMORY_RANGE struct to wrap offset/size
5540    MEMORY_RANGE range_wrap;
5541    // Synch linear with range1 to avoid padding and potential validation error case
5542    range_wrap.linear = range1->linear;
5543    range_wrap.start = offset;
5544    range_wrap.end = end;
5545    bool tmp_bool;
5546    return rangesIntersect(dev_data, range1, &range_wrap, &tmp_bool);
5547}
5548// For given mem_info, set all ranges valid that intersect [offset-end] range
5549// TODO : For ranges where there is no alias, we may want to create new buffer ranges that are valid
5550static void SetMemRangesValid(layer_data const *dev_data, DEVICE_MEM_INFO *mem_info, VkDeviceSize offset, VkDeviceSize end) {
5551    bool tmp_bool = false;
5552    MEMORY_RANGE map_range;
5553    map_range.linear = true;
5554    map_range.start = offset;
5555    map_range.end = end;
5556    for (auto &handle_range_pair : mem_info->bound_ranges) {
5557        if (rangesIntersect(dev_data, &handle_range_pair.second, &map_range, &tmp_bool)) {
5558            // TODO : WARN here if tmp_bool true?
5559            handle_range_pair.second.valid = true;
5560        }
5561    }
5562}
5563// Object with given handle is being bound to memory w/ given mem_info struct.
5564//  Track the newly bound memory range with given memoryOffset
5565//  Also scan any previous ranges, track aliased ranges with new range, and flag an error if a linear
5566//  and non-linear range incorrectly overlap.
5567// Return true if an error is flagged and the user callback returns "true", otherwise false
5568// is_image indicates an image object, otherwise handle is for a buffer
5569// is_linear indicates a buffer or linear image
5570static bool InsertMemoryRange(layer_data const *dev_data, uint64_t handle, DEVICE_MEM_INFO *mem_info, VkDeviceSize memoryOffset,
5571                              VkMemoryRequirements memRequirements, bool is_image, bool is_linear) {
5572    bool skip_call = false;
5573    MEMORY_RANGE range;
5574
5575    range.image = is_image;
5576    range.handle = handle;
5577    range.linear = is_linear;
5578    range.valid = mem_info->global_valid;
5579    range.memory = mem_info->mem;
5580    range.start = memoryOffset;
5581    range.size = memRequirements.size;
5582    range.end = memoryOffset + memRequirements.size - 1;
5583    range.aliases.clear();
5584    // Update Memory aliasing
5585    // Save aliase ranges so we can copy into final map entry below. Can't do it in loop b/c we don't yet have final ptr. If we
5586    // inserted into map before loop to get the final ptr, then we may enter loop when not needed & we check range against itself
5587    std::unordered_set<MEMORY_RANGE *> tmp_alias_ranges;
5588    for (auto &obj_range_pair : mem_info->bound_ranges) {
5589        auto check_range = &obj_range_pair.second;
5590        bool intersection_error = false;
5591        if (rangesIntersect(dev_data, &range, check_range, &intersection_error)) {
5592            skip_call |= intersection_error;
5593            range.aliases.insert(check_range);
5594            tmp_alias_ranges.insert(check_range);
5595        }
5596    }
5597    mem_info->bound_ranges[handle] = std::move(range);
5598    for (auto tmp_range : tmp_alias_ranges) {
5599        tmp_range->aliases.insert(&mem_info->bound_ranges[handle]);
5600    }
5601    if (is_image)
5602        mem_info->bound_images.insert(handle);
5603    else
5604        mem_info->bound_buffers.insert(handle);
5605
5606    return skip_call;
5607}
5608
5609static bool InsertImageMemoryRange(layer_data const *dev_data, VkImage image, DEVICE_MEM_INFO *mem_info, VkDeviceSize mem_offset,
5610                                   VkMemoryRequirements mem_reqs, bool is_linear) {
5611    return InsertMemoryRange(dev_data, reinterpret_cast<uint64_t &>(image), mem_info, mem_offset, mem_reqs, true, is_linear);
5612}
5613
5614static bool InsertBufferMemoryRange(layer_data const *dev_data, VkBuffer buffer, DEVICE_MEM_INFO *mem_info, VkDeviceSize mem_offset,
5615                                    VkMemoryRequirements mem_reqs) {
5616    return InsertMemoryRange(dev_data, reinterpret_cast<uint64_t &>(buffer), mem_info, mem_offset, mem_reqs, false, true);
5617}
5618
5619// Remove MEMORY_RANGE struct for give handle from bound_ranges of mem_info
5620//  is_image indicates if handle is for image or buffer
5621//  This function will also remove the handle-to-index mapping from the appropriate
5622//  map and clean up any aliases for range being removed.
5623static void RemoveMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info, bool is_image) {
5624    auto erase_range = &mem_info->bound_ranges[handle];
5625    for (auto alias_range : erase_range->aliases) {
5626        alias_range->aliases.erase(erase_range);
5627    }
5628    erase_range->aliases.clear();
5629    mem_info->bound_ranges.erase(handle);
5630    if (is_image)
5631        mem_info->bound_images.erase(handle);
5632    else
5633        mem_info->bound_buffers.erase(handle);
5634}
5635
5636static void RemoveBufferMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info) { RemoveMemoryRange(handle, mem_info, false); }
5637
5638static void RemoveImageMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info) { RemoveMemoryRange(handle, mem_info, true); }
5639
5640VKAPI_ATTR void VKAPI_CALL DestroyBuffer(VkDevice device, VkBuffer buffer,
5641                                         const VkAllocationCallbacks *pAllocator) {
5642    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5643    std::unique_lock<std::mutex> lock(global_lock);
5644    if (!validateIdleBuffer(dev_data, buffer)) {
5645        // Clean up memory binding and range information for buffer
5646        auto buff_node = getBufferNode(dev_data, buffer);
5647        if (buff_node) {
5648            // Any bound cmd buffers are now invalid
5649            invalidateCommandBuffers(buff_node->cb_bindings,
5650                                     {reinterpret_cast<uint64_t &>(buff_node->buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT});
5651            auto mem_info = getMemObjInfo(dev_data, buff_node->mem);
5652            if (mem_info) {
5653                RemoveBufferMemoryRange(reinterpret_cast<uint64_t &>(buffer), mem_info);
5654            }
5655            clear_object_binding(dev_data, reinterpret_cast<uint64_t &>(buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT);
5656            dev_data->bufferMap.erase(buff_node->buffer);
5657        }
5658        lock.unlock();
5659        dev_data->device_dispatch_table->DestroyBuffer(device, buffer, pAllocator);
5660    }
5661}
5662
5663VKAPI_ATTR void VKAPI_CALL
5664DestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator) {
5665    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5666
5667    std::unique_lock<std::mutex> lock(global_lock);
5668    auto view_state = getBufferViewState(dev_data, bufferView);
5669    if (view_state) {
5670        dev_data->bufferViewMap.erase(bufferView);
5671    }
5672    lock.unlock();
5673    dev_data->device_dispatch_table->DestroyBufferView(device, bufferView, pAllocator);
5674}
5675
5676VKAPI_ATTR void VKAPI_CALL DestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
5677    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5678
5679    std::unique_lock<std::mutex> lock(global_lock);
5680    auto img_node = getImageNode(dev_data, image);
5681    if (img_node) {
5682        // Any bound cmd buffers are now invalid
5683        invalidateCommandBuffers(img_node->cb_bindings,
5684                                 {reinterpret_cast<uint64_t &>(img_node->image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT});
5685        // Clean up memory mapping, bindings and range references for image
5686        auto mem_info = getMemObjInfo(dev_data, img_node->mem);
5687        if (mem_info) {
5688            RemoveImageMemoryRange(reinterpret_cast<uint64_t &>(image), mem_info);
5689            clear_object_binding(dev_data, reinterpret_cast<uint64_t &>(image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
5690        }
5691        // Remove image from imageMap
5692        dev_data->imageMap.erase(img_node->image);
5693    }
5694    const auto& subEntry = dev_data->imageSubresourceMap.find(image);
5695    if (subEntry != dev_data->imageSubresourceMap.end()) {
5696        for (const auto& pair : subEntry->second) {
5697            dev_data->imageLayoutMap.erase(pair);
5698        }
5699        dev_data->imageSubresourceMap.erase(subEntry);
5700    }
5701    lock.unlock();
5702    dev_data->device_dispatch_table->DestroyImage(device, image, pAllocator);
5703}
5704
5705static bool ValidateMemoryTypes(const layer_data *dev_data, const DEVICE_MEM_INFO *mem_info, const uint32_t memory_type_bits,
5706                                  const char *funcName) {
5707    bool skip_call = false;
5708    if (((1 << mem_info->alloc_info.memoryTypeIndex) & memory_type_bits) == 0) {
5709        skip_call = log_msg(
5710            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5711            reinterpret_cast<const uint64_t &>(mem_info->mem), __LINE__, MEMTRACK_INVALID_MEM_TYPE, "MT",
5712            "%s(): MemoryRequirements->memoryTypeBits (0x%X) for this object type are not compatible with the memory "
5713            "type (0x%X) of this memory object 0x%" PRIx64 ".",
5714            funcName, memory_type_bits, mem_info->alloc_info.memoryTypeIndex, reinterpret_cast<const uint64_t &>(mem_info->mem));
5715    }
5716    return skip_call;
5717}
5718
5719VKAPI_ATTR VkResult VKAPI_CALL
5720BindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
5721    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5722    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5723    std::unique_lock<std::mutex> lock(global_lock);
5724    // Track objects tied to memory
5725    uint64_t buffer_handle = reinterpret_cast<uint64_t &>(buffer);
5726    bool skip_call = set_mem_binding(dev_data, mem, buffer_handle, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "vkBindBufferMemory");
5727    auto buffer_node = getBufferNode(dev_data, buffer);
5728    if (buffer_node) {
5729        VkMemoryRequirements memRequirements;
5730        dev_data->device_dispatch_table->GetBufferMemoryRequirements(device, buffer, &memRequirements);
5731        buffer_node->mem = mem;
5732        buffer_node->memOffset = memoryOffset;
5733        buffer_node->memSize = memRequirements.size;
5734
5735        // Track and validate bound memory range information
5736        auto mem_info = getMemObjInfo(dev_data, mem);
5737        if (mem_info) {
5738            skip_call |= InsertBufferMemoryRange(dev_data, buffer, mem_info, memoryOffset, memRequirements);
5739            skip_call |= ValidateMemoryTypes(dev_data, mem_info, memRequirements.memoryTypeBits, "BindBufferMemory");
5740        }
5741
5742        // Validate memory requirements alignment
5743        if (vk_safe_modulo(memoryOffset, memRequirements.alignment) != 0) {
5744            skip_call |=
5745                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
5746                        __LINE__, DRAWSTATE_INVALID_BUFFER_MEMORY_OFFSET, "DS",
5747                        "vkBindBufferMemory(): memoryOffset is 0x%" PRIxLEAST64 " but must be an integer multiple of the "
5748                        "VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
5749                        ", returned from a call to vkGetBufferMemoryRequirements with buffer",
5750                        memoryOffset, memRequirements.alignment);
5751        }
5752
5753        // Validate device limits alignments
5754        static const VkBufferUsageFlagBits usage_list[3] = {
5755            static_cast<VkBufferUsageFlagBits>(VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT),
5756            VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT,
5757            VK_BUFFER_USAGE_STORAGE_BUFFER_BIT};
5758        static const char *memory_type[3] = {"texel",
5759                                             "uniform",
5760                                             "storage"};
5761        static const char *offset_name[3] = {
5762            "minTexelBufferOffsetAlignment",
5763            "minUniformBufferOffsetAlignment",
5764            "minStorageBufferOffsetAlignment"
5765        };
5766
5767        // Keep this one fresh!
5768        const VkDeviceSize offset_requirement[3] = {
5769            dev_data->phys_dev_properties.properties.limits.minTexelBufferOffsetAlignment,
5770            dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment,
5771            dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment
5772        };
5773        VkBufferUsageFlags usage = dev_data->bufferMap[buffer].get()->createInfo.usage;
5774
5775        for (int i = 0; i < 3; i++) {
5776            if (usage & usage_list[i]) {
5777                if (vk_safe_modulo(memoryOffset, offset_requirement[i]) != 0) {
5778                    skip_call |=
5779                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
5780                                0, __LINE__, DRAWSTATE_INVALID_TEXEL_BUFFER_OFFSET, "DS",
5781                                "vkBindBufferMemory(): %s memoryOffset is 0x%" PRIxLEAST64 " but must be a multiple of "
5782                                "device limit %s 0x%" PRIxLEAST64,
5783                                memory_type[i], memoryOffset, offset_name[i], offset_requirement[i]);
5784                }
5785            }
5786        }
5787    }
5788    print_mem_list(dev_data);
5789    lock.unlock();
5790    if (!skip_call) {
5791        result = dev_data->device_dispatch_table->BindBufferMemory(device, buffer, mem, memoryOffset);
5792    }
5793    return result;
5794}
5795
5796VKAPI_ATTR void VKAPI_CALL
5797GetBufferMemoryRequirements(VkDevice device, VkBuffer buffer, VkMemoryRequirements *pMemoryRequirements) {
5798    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5799    // TODO : What to track here?
5800    //   Could potentially save returned mem requirements and validate values passed into BindBufferMemory
5801    my_data->device_dispatch_table->GetBufferMemoryRequirements(device, buffer, pMemoryRequirements);
5802}
5803
5804VKAPI_ATTR void VKAPI_CALL
5805GetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements *pMemoryRequirements) {
5806    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5807    // TODO : What to track here?
5808    //   Could potentially save returned mem requirements and validate values passed into BindImageMemory
5809    my_data->device_dispatch_table->GetImageMemoryRequirements(device, image, pMemoryRequirements);
5810}
5811
5812VKAPI_ATTR void VKAPI_CALL
5813DestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator) {
5814    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5815    bool skip = false;
5816    std::unique_lock<std::mutex> lock(global_lock);
5817    auto view_state = getImageViewState(dev_data, imageView);
5818    if (view_state) {
5819        VK_OBJECT obj_struct = {reinterpret_cast<uint64_t &>(imageView), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT};
5820        skip |= ValidateObjectNotInUse(dev_data, view_state, obj_struct);
5821        // Any bound cmd buffers are now invalid
5822        invalidateCommandBuffers(view_state->cb_bindings, obj_struct);
5823    }
5824    if (!skip) {
5825        dev_data->imageViewMap.erase(imageView);
5826        lock.unlock();
5827        dev_data->device_dispatch_table->DestroyImageView(device, imageView, pAllocator);
5828    }
5829}
5830
5831VKAPI_ATTR void VKAPI_CALL
5832DestroyShaderModule(VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks *pAllocator) {
5833    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5834
5835    std::unique_lock<std::mutex> lock(global_lock);
5836    my_data->shaderModuleMap.erase(shaderModule);
5837    lock.unlock();
5838
5839    my_data->device_dispatch_table->DestroyShaderModule(device, shaderModule, pAllocator);
5840}
5841
5842VKAPI_ATTR void VKAPI_CALL
5843DestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) {
5844    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5845    bool skip = false;
5846    std::unique_lock<std::mutex> lock(global_lock);
5847    auto pipe_node = getPipeline(dev_data, pipeline);
5848    if (pipe_node) {
5849        VK_OBJECT obj_struct = {reinterpret_cast<uint64_t &>(pipeline), VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT};
5850        skip |= ValidateObjectNotInUse(dev_data, pipe_node, obj_struct);
5851        // Any bound cmd buffers are now invalid
5852        invalidateCommandBuffers(pipe_node->cb_bindings, obj_struct);
5853    }
5854    if (!skip) {
5855        dev_data->pipelineMap.erase(pipeline);
5856        lock.unlock();
5857        dev_data->device_dispatch_table->DestroyPipeline(device, pipeline, pAllocator);
5858    }
5859}
5860
5861VKAPI_ATTR void VKAPI_CALL
5862DestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks *pAllocator) {
5863    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5864    std::unique_lock<std::mutex> lock(global_lock);
5865    dev_data->pipelineLayoutMap.erase(pipelineLayout);
5866    lock.unlock();
5867
5868    dev_data->device_dispatch_table->DestroyPipelineLayout(device, pipelineLayout, pAllocator);
5869}
5870
5871VKAPI_ATTR void VKAPI_CALL
5872DestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) {
5873    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5874    bool skip = false;
5875    std::unique_lock<std::mutex> lock(global_lock);
5876    auto sampler_node = getSamplerNode(dev_data, sampler);
5877    if (sampler_node) {
5878        VK_OBJECT obj_struct = {reinterpret_cast<uint64_t &>(sampler), VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT};
5879        skip |= ValidateObjectNotInUse(dev_data, sampler_node, obj_struct);
5880        // Any bound cmd buffers are now invalid
5881        invalidateCommandBuffers(sampler_node->cb_bindings, obj_struct);
5882    }
5883    if (!skip) {
5884        dev_data->samplerMap.erase(sampler);
5885        lock.unlock();
5886        dev_data->device_dispatch_table->DestroySampler(device, sampler, pAllocator);
5887    }
5888}
5889
5890VKAPI_ATTR void VKAPI_CALL
5891DestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks *pAllocator) {
5892    // TODO : Clean up any internal data structures using this obj.
5893    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5894        ->device_dispatch_table->DestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator);
5895}
5896
5897VKAPI_ATTR void VKAPI_CALL
5898DestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks *pAllocator) {
5899    // TODO : Clean up any internal data structures using this obj.
5900    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5901        ->device_dispatch_table->DestroyDescriptorPool(device, descriptorPool, pAllocator);
5902}
5903// Verify cmdBuffer in given cb_node is not in global in-flight set, and return skip_call result
5904//  If this is a secondary command buffer, then make sure its primary is also in-flight
5905//  If primary is not in-flight, then remove secondary from global in-flight set
5906// This function is only valid at a point when cmdBuffer is being reset or freed
5907static bool checkCommandBufferInFlight(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const char *action) {
5908    bool skip_call = false;
5909    if (dev_data->globalInFlightCmdBuffers.count(cb_node->commandBuffer)) {
5910        // Primary CB or secondary where primary is also in-flight is an error
5911        if ((cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_SECONDARY) ||
5912            (dev_data->globalInFlightCmdBuffers.count(cb_node->primaryCommandBuffer))) {
5913            skip_call |= log_msg(
5914                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5915                reinterpret_cast<const uint64_t &>(cb_node->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
5916                "Attempt to %s command buffer (0x%" PRIxLEAST64 ") which is in use.", action,
5917                reinterpret_cast<const uint64_t &>(cb_node->commandBuffer));
5918        }
5919    }
5920    return skip_call;
5921}
5922
5923// Iterate over all cmdBuffers in given commandPool and verify that each is not in use
5924static bool checkCommandBuffersInFlight(layer_data *dev_data, COMMAND_POOL_NODE *pPool, const char *action) {
5925    bool skip_call = false;
5926    for (auto cmd_buffer : pPool->commandBuffers) {
5927        if (dev_data->globalInFlightCmdBuffers.count(cmd_buffer)) {
5928            skip_call |= checkCommandBufferInFlight(dev_data, getCBNode(dev_data, cmd_buffer), action);
5929        }
5930    }
5931    return skip_call;
5932}
5933
5934static void clearCommandBuffersInFlight(layer_data *dev_data, COMMAND_POOL_NODE *pPool) {
5935    for (auto cmd_buffer : pPool->commandBuffers) {
5936        dev_data->globalInFlightCmdBuffers.erase(cmd_buffer);
5937    }
5938}
5939
5940VKAPI_ATTR void VKAPI_CALL
5941FreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer *pCommandBuffers) {
5942    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5943    bool skip_call = false;
5944    std::unique_lock<std::mutex> lock(global_lock);
5945
5946    for (uint32_t i = 0; i < commandBufferCount; i++) {
5947        auto cb_node = getCBNode(dev_data, pCommandBuffers[i]);
5948        // Delete CB information structure, and remove from commandBufferMap
5949        if (cb_node) {
5950            skip_call |= checkCommandBufferInFlight(dev_data, cb_node, "free");
5951        }
5952    }
5953
5954    if (skip_call)
5955        return;
5956
5957    auto pPool = getCommandPoolNode(dev_data, commandPool);
5958    for (uint32_t i = 0; i < commandBufferCount; i++) {
5959        auto cb_node = getCBNode(dev_data, pCommandBuffers[i]);
5960        // Delete CB information structure, and remove from commandBufferMap
5961        if (cb_node) {
5962            dev_data->globalInFlightCmdBuffers.erase(cb_node->commandBuffer);
5963            // reset prior to delete for data clean-up
5964            resetCB(dev_data, cb_node->commandBuffer);
5965            dev_data->commandBufferMap.erase(cb_node->commandBuffer);
5966            delete cb_node;
5967        }
5968
5969        // Remove commandBuffer reference from commandPoolMap
5970        pPool->commandBuffers.remove(pCommandBuffers[i]);
5971    }
5972    printCBList(dev_data);
5973    lock.unlock();
5974
5975    dev_data->device_dispatch_table->FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);
5976}
5977
5978VKAPI_ATTR VkResult VKAPI_CALL CreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
5979                                                 const VkAllocationCallbacks *pAllocator,
5980                                                 VkCommandPool *pCommandPool) {
5981    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5982
5983    VkResult result = dev_data->device_dispatch_table->CreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool);
5984
5985    if (VK_SUCCESS == result) {
5986        std::lock_guard<std::mutex> lock(global_lock);
5987        dev_data->commandPoolMap[*pCommandPool].createFlags = pCreateInfo->flags;
5988        dev_data->commandPoolMap[*pCommandPool].queueFamilyIndex = pCreateInfo->queueFamilyIndex;
5989    }
5990    return result;
5991}
5992
5993VKAPI_ATTR VkResult VKAPI_CALL CreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
5994                                               const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) {
5995
5996    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5997    VkResult result = dev_data->device_dispatch_table->CreateQueryPool(device, pCreateInfo, pAllocator, pQueryPool);
5998    if (result == VK_SUCCESS) {
5999        std::lock_guard<std::mutex> lock(global_lock);
6000        QUERY_POOL_NODE *qp_node = &dev_data->queryPoolMap[*pQueryPool];
6001        qp_node->createInfo = *pCreateInfo;
6002    }
6003    return result;
6004}
6005
6006// Destroy commandPool along with all of the commandBuffers allocated from that pool
6007VKAPI_ATTR void VKAPI_CALL
6008DestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
6009    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6010    bool skip_call = false;
6011    std::unique_lock<std::mutex> lock(global_lock);
6012    // Verify that command buffers in pool are complete (not in-flight)
6013    auto pPool = getCommandPoolNode(dev_data, commandPool);
6014    skip_call |= checkCommandBuffersInFlight(dev_data, pPool, "destroy command pool with");
6015
6016    if (skip_call)
6017        return;
6018    // Must remove cmdpool from cmdpoolmap, after removing all cmdbuffers in its list from the commandBufferMap
6019    clearCommandBuffersInFlight(dev_data, pPool);
6020    for (auto cb : pPool->commandBuffers) {
6021        clear_cmd_buf_and_mem_references(dev_data, cb);
6022        auto cb_node = getCBNode(dev_data, cb);
6023        // Remove references to this cb_node prior to delete
6024        // TODO : Need better solution here, resetCB?
6025        for (auto obj : cb_node->object_bindings) {
6026            removeCommandBufferBinding(dev_data, &obj, cb_node);
6027        }
6028        for (auto framebuffer : cb_node->framebuffers) {
6029            auto fb_node = getFramebuffer(dev_data, framebuffer);
6030            if (fb_node)
6031                fb_node->cb_bindings.erase(cb_node);
6032        }
6033        dev_data->commandBufferMap.erase(cb); // Remove this command buffer
6034        delete cb_node;                       // delete CB info structure
6035    }
6036    dev_data->commandPoolMap.erase(commandPool);
6037    lock.unlock();
6038
6039    dev_data->device_dispatch_table->DestroyCommandPool(device, commandPool, pAllocator);
6040}
6041
6042VKAPI_ATTR VkResult VKAPI_CALL
6043ResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) {
6044    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6045    bool skip_call = false;
6046
6047    std::unique_lock<std::mutex> lock(global_lock);
6048    auto pPool = getCommandPoolNode(dev_data, commandPool);
6049    skip_call |= checkCommandBuffersInFlight(dev_data, pPool, "reset command pool with");
6050    lock.unlock();
6051
6052    if (skip_call)
6053        return VK_ERROR_VALIDATION_FAILED_EXT;
6054
6055    VkResult result = dev_data->device_dispatch_table->ResetCommandPool(device, commandPool, flags);
6056
6057    // Reset all of the CBs allocated from this pool
6058    if (VK_SUCCESS == result) {
6059        lock.lock();
6060        clearCommandBuffersInFlight(dev_data, pPool);
6061        for (auto cmdBuffer : pPool->commandBuffers) {
6062            resetCB(dev_data, cmdBuffer);
6063        }
6064        lock.unlock();
6065    }
6066    return result;
6067}
6068
6069VKAPI_ATTR VkResult VKAPI_CALL ResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) {
6070    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6071    bool skip_call = false;
6072    std::unique_lock<std::mutex> lock(global_lock);
6073    for (uint32_t i = 0; i < fenceCount; ++i) {
6074        auto pFence = getFenceNode(dev_data, pFences[i]);
6075        if (pFence && pFence->state == FENCE_INFLIGHT) {
6076            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
6077                                 reinterpret_cast<const uint64_t &>(pFences[i]), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
6078                                 "Fence 0x%" PRIx64 " is in use.", reinterpret_cast<const uint64_t &>(pFences[i]));
6079        }
6080    }
6081    lock.unlock();
6082
6083    if (skip_call)
6084        return VK_ERROR_VALIDATION_FAILED_EXT;
6085
6086    VkResult result = dev_data->device_dispatch_table->ResetFences(device, fenceCount, pFences);
6087
6088    if (result == VK_SUCCESS) {
6089        lock.lock();
6090        for (uint32_t i = 0; i < fenceCount; ++i) {
6091            auto pFence = getFenceNode(dev_data, pFences[i]);
6092            if (pFence) {
6093                pFence->state = FENCE_UNSIGNALED;
6094            }
6095        }
6096        lock.unlock();
6097    }
6098
6099    return result;
6100}
6101
6102// For given cb_nodes, invalidate them and track object causing invalidation
6103void invalidateCommandBuffers(std::unordered_set<GLOBAL_CB_NODE *> cb_nodes, VK_OBJECT obj) {
6104    for (auto cb_node : cb_nodes) {
6105        cb_node->state = CB_INVALID;
6106        cb_node->broken_bindings.push_back(obj);
6107    }
6108}
6109
6110VKAPI_ATTR void VKAPI_CALL
6111DestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) {
6112    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6113    std::unique_lock<std::mutex> lock(global_lock);
6114    auto fb_node = getFramebuffer(dev_data, framebuffer);
6115    if (fb_node) {
6116        invalidateCommandBuffers(fb_node->cb_bindings,
6117                                 {reinterpret_cast<uint64_t &>(fb_node->framebuffer), VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT});
6118        dev_data->frameBufferMap.erase(fb_node->framebuffer);
6119    }
6120    lock.unlock();
6121    dev_data->device_dispatch_table->DestroyFramebuffer(device, framebuffer, pAllocator);
6122}
6123
6124VKAPI_ATTR void VKAPI_CALL
6125DestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) {
6126    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6127    std::unique_lock<std::mutex> lock(global_lock);
6128    dev_data->renderPassMap.erase(renderPass);
6129    // TODO: leaking all the guts of the renderpass node here!
6130    lock.unlock();
6131    dev_data->device_dispatch_table->DestroyRenderPass(device, renderPass, pAllocator);
6132}
6133
6134VKAPI_ATTR VkResult VKAPI_CALL CreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo,
6135                                            const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) {
6136    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6137
6138    VkResult result = dev_data->device_dispatch_table->CreateBuffer(device, pCreateInfo, pAllocator, pBuffer);
6139
6140    if (VK_SUCCESS == result) {
6141        std::lock_guard<std::mutex> lock(global_lock);
6142        // TODO : This doesn't create deep copy of pQueueFamilyIndices so need to fix that if/when we want that data to be valid
6143        dev_data->bufferMap.insert(std::make_pair(*pBuffer, unique_ptr<BUFFER_NODE>(new BUFFER_NODE(*pBuffer, pCreateInfo))));
6144    }
6145    return result;
6146}
6147
6148static bool PreCallValidateCreateBufferView(layer_data *dev_data, const VkBufferViewCreateInfo *pCreateInfo) {
6149    bool skip_call = false;
6150    BUFFER_NODE *buf_node = getBufferNode(dev_data, pCreateInfo->buffer);
6151    // If this isn't a sparse buffer, it needs to have memory backing it at CreateBufferView time
6152    if (buf_node) {
6153        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buf_node, "vkCreateBufferView()");
6154        // In order to create a valid buffer view, the buffer must have been created with at least one of the
6155        // following flags:  UNIFORM_TEXEL_BUFFER_BIT or STORAGE_TEXEL_BUFFER_BIT
6156        skip_call |= ValidateBufferUsageFlags(dev_data, buf_node,
6157                                              VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT,
6158                                              false, "vkCreateBufferView()", "VK_BUFFER_USAGE_[STORAGE|UNIFORM]_TEXEL_BUFFER_BIT");
6159    }
6160    return skip_call;
6161}
6162
6163VKAPI_ATTR VkResult VKAPI_CALL CreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo,
6164                                                const VkAllocationCallbacks *pAllocator, VkBufferView *pView) {
6165    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6166    std::unique_lock<std::mutex> lock(global_lock);
6167    bool skip_call = PreCallValidateCreateBufferView(dev_data, pCreateInfo);
6168    lock.unlock();
6169    if (skip_call)
6170        return VK_ERROR_VALIDATION_FAILED_EXT;
6171    VkResult result = dev_data->device_dispatch_table->CreateBufferView(device, pCreateInfo, pAllocator, pView);
6172    if (VK_SUCCESS == result) {
6173        lock.lock();
6174        dev_data->bufferViewMap[*pView] = unique_ptr<BUFFER_VIEW_STATE>(new BUFFER_VIEW_STATE(*pView, pCreateInfo));
6175        lock.unlock();
6176    }
6177    return result;
6178}
6179
6180VKAPI_ATTR VkResult VKAPI_CALL CreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo,
6181                                           const VkAllocationCallbacks *pAllocator, VkImage *pImage) {
6182    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6183
6184    VkResult result = dev_data->device_dispatch_table->CreateImage(device, pCreateInfo, pAllocator, pImage);
6185
6186    if (VK_SUCCESS == result) {
6187        std::lock_guard<std::mutex> lock(global_lock);
6188        IMAGE_LAYOUT_NODE image_node;
6189        image_node.layout = pCreateInfo->initialLayout;
6190        image_node.format = pCreateInfo->format;
6191        dev_data->imageMap.insert(std::make_pair(*pImage, unique_ptr<IMAGE_NODE>(new IMAGE_NODE(*pImage, pCreateInfo))));
6192        ImageSubresourcePair subpair = {*pImage, false, VkImageSubresource()};
6193        dev_data->imageSubresourceMap[*pImage].push_back(subpair);
6194        dev_data->imageLayoutMap[subpair] = image_node;
6195    }
6196    return result;
6197}
6198
6199static void ResolveRemainingLevelsLayers(layer_data *dev_data, VkImageSubresourceRange *range, VkImage image) {
6200    /* expects global_lock to be held by caller */
6201
6202    auto image_node = getImageNode(dev_data, image);
6203    if (image_node) {
6204        /* If the caller used the special values VK_REMAINING_MIP_LEVELS and
6205         * VK_REMAINING_ARRAY_LAYERS, resolve them now in our internal state to
6206         * the actual values.
6207         */
6208        if (range->levelCount == VK_REMAINING_MIP_LEVELS) {
6209            range->levelCount = image_node->createInfo.mipLevels - range->baseMipLevel;
6210        }
6211
6212        if (range->layerCount == VK_REMAINING_ARRAY_LAYERS) {
6213            range->layerCount = image_node->createInfo.arrayLayers - range->baseArrayLayer;
6214        }
6215    }
6216}
6217
6218// Return the correct layer/level counts if the caller used the special
6219// values VK_REMAINING_MIP_LEVELS or VK_REMAINING_ARRAY_LAYERS.
6220static void ResolveRemainingLevelsLayers(layer_data *dev_data, uint32_t *levels, uint32_t *layers, VkImageSubresourceRange range,
6221                                         VkImage image) {
6222    /* expects global_lock to be held by caller */
6223
6224    *levels = range.levelCount;
6225    *layers = range.layerCount;
6226    auto image_node = getImageNode(dev_data, image);
6227    if (image_node) {
6228        if (range.levelCount == VK_REMAINING_MIP_LEVELS) {
6229            *levels = image_node->createInfo.mipLevels - range.baseMipLevel;
6230        }
6231        if (range.layerCount == VK_REMAINING_ARRAY_LAYERS) {
6232            *layers = image_node->createInfo.arrayLayers - range.baseArrayLayer;
6233        }
6234    }
6235}
6236
6237static bool PreCallValidateCreateImageView(layer_data *dev_data, const VkImageViewCreateInfo *pCreateInfo) {
6238    bool skip_call = false;
6239    IMAGE_NODE *image_node = getImageNode(dev_data, pCreateInfo->image);
6240    if (image_node) {
6241        skip_call |= ValidateImageUsageFlags(
6242            dev_data, image_node, VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT |
6243                                      VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
6244            false, "vkCreateImageView()",
6245            "VK_IMAGE_USAGE_[SAMPLED|STORAGE|COLOR_ATTACHMENT|DEPTH_STENCIL_ATTACHMENT|INPUT_ATTACHMENT]_BIT");
6246        // If this isn't a sparse image, it needs to have memory backing it at CreateImageView time
6247        skip_call |= ValidateMemoryIsBoundToImage(dev_data, image_node, "vkCreateImageView()");
6248    }
6249    return skip_call;
6250}
6251
6252static inline void PostCallRecordCreateImageView(layer_data *dev_data, const VkImageViewCreateInfo *pCreateInfo, VkImageView view) {
6253    dev_data->imageViewMap[view] = unique_ptr<IMAGE_VIEW_STATE>(new IMAGE_VIEW_STATE(view, pCreateInfo));
6254    ResolveRemainingLevelsLayers(dev_data, &dev_data->imageViewMap[view].get()->create_info.subresourceRange, pCreateInfo->image);
6255}
6256
6257VKAPI_ATTR VkResult VKAPI_CALL CreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo,
6258                                               const VkAllocationCallbacks *pAllocator, VkImageView *pView) {
6259    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6260    std::unique_lock<std::mutex> lock(global_lock);
6261    bool skip_call = PreCallValidateCreateImageView(dev_data, pCreateInfo);
6262    lock.unlock();
6263    if (skip_call)
6264        return VK_ERROR_VALIDATION_FAILED_EXT;
6265    VkResult result = dev_data->device_dispatch_table->CreateImageView(device, pCreateInfo, pAllocator, pView);
6266    if (VK_SUCCESS == result) {
6267        lock.lock();
6268        PostCallRecordCreateImageView(dev_data, pCreateInfo, *pView);
6269        lock.unlock();
6270    }
6271
6272    return result;
6273}
6274
6275VKAPI_ATTR VkResult VKAPI_CALL
6276CreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkFence *pFence) {
6277    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6278    VkResult result = dev_data->device_dispatch_table->CreateFence(device, pCreateInfo, pAllocator, pFence);
6279    if (VK_SUCCESS == result) {
6280        std::lock_guard<std::mutex> lock(global_lock);
6281        auto &fence_node = dev_data->fenceMap[*pFence];
6282        fence_node.fence = *pFence;
6283        fence_node.createInfo = *pCreateInfo;
6284        fence_node.state = (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) ? FENCE_RETIRED : FENCE_UNSIGNALED;
6285    }
6286    return result;
6287}
6288
6289// TODO handle pipeline caches
6290VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo *pCreateInfo,
6291                                                   const VkAllocationCallbacks *pAllocator, VkPipelineCache *pPipelineCache) {
6292    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6293    VkResult result = dev_data->device_dispatch_table->CreatePipelineCache(device, pCreateInfo, pAllocator, pPipelineCache);
6294    return result;
6295}
6296
6297VKAPI_ATTR void VKAPI_CALL
6298DestroyPipelineCache(VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks *pAllocator) {
6299    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6300    dev_data->device_dispatch_table->DestroyPipelineCache(device, pipelineCache, pAllocator);
6301}
6302
6303VKAPI_ATTR VkResult VKAPI_CALL
6304GetPipelineCacheData(VkDevice device, VkPipelineCache pipelineCache, size_t *pDataSize, void *pData) {
6305    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6306    VkResult result = dev_data->device_dispatch_table->GetPipelineCacheData(device, pipelineCache, pDataSize, pData);
6307    return result;
6308}
6309
6310VKAPI_ATTR VkResult VKAPI_CALL
6311MergePipelineCaches(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache *pSrcCaches) {
6312    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6313    VkResult result = dev_data->device_dispatch_table->MergePipelineCaches(device, dstCache, srcCacheCount, pSrcCaches);
6314    return result;
6315}
6316
6317// utility function to set collective state for pipeline
6318void set_pipeline_state(PIPELINE_NODE *pPipe) {
6319    // If any attachment used by this pipeline has blendEnable, set top-level blendEnable
6320    if (pPipe->graphicsPipelineCI.pColorBlendState) {
6321        for (size_t i = 0; i < pPipe->attachments.size(); ++i) {
6322            if (VK_TRUE == pPipe->attachments[i].blendEnable) {
6323                if (((pPipe->attachments[i].dstAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6324                     (pPipe->attachments[i].dstAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6325                    ((pPipe->attachments[i].dstColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6326                     (pPipe->attachments[i].dstColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6327                    ((pPipe->attachments[i].srcAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6328                     (pPipe->attachments[i].srcAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6329                    ((pPipe->attachments[i].srcColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6330                     (pPipe->attachments[i].srcColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA))) {
6331                    pPipe->blendConstantsEnabled = true;
6332                }
6333            }
6334        }
6335    }
6336}
6337
6338VKAPI_ATTR VkResult VKAPI_CALL
6339CreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
6340                        const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
6341                        VkPipeline *pPipelines) {
6342    VkResult result = VK_SUCCESS;
6343    // TODO What to do with pipelineCache?
6344    // The order of operations here is a little convoluted but gets the job done
6345    //  1. Pipeline create state is first shadowed into PIPELINE_NODE struct
6346    //  2. Create state is then validated (which uses flags setup during shadowing)
6347    //  3. If everything looks good, we'll then create the pipeline and add NODE to pipelineMap
6348    bool skip_call = false;
6349    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
6350    vector<PIPELINE_NODE *> pPipeNode(count);
6351    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6352
6353    uint32_t i = 0;
6354    std::unique_lock<std::mutex> lock(global_lock);
6355
6356    for (i = 0; i < count; i++) {
6357        pPipeNode[i] = new PIPELINE_NODE;
6358        pPipeNode[i]->initGraphicsPipeline(&pCreateInfos[i]);
6359        pPipeNode[i]->render_pass_ci.initialize(getRenderPass(dev_data, pCreateInfos[i].renderPass)->pCreateInfo);
6360        pPipeNode[i]->pipeline_layout = *getPipelineLayout(dev_data, pCreateInfos[i].layout);
6361
6362        skip_call |= verifyPipelineCreateState(dev_data, device, pPipeNode, i);
6363    }
6364
6365    if (!skip_call) {
6366        lock.unlock();
6367        result = dev_data->device_dispatch_table->CreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator,
6368                                                                          pPipelines);
6369        lock.lock();
6370        for (i = 0; i < count; i++) {
6371            pPipeNode[i]->pipeline = pPipelines[i];
6372            dev_data->pipelineMap[pPipeNode[i]->pipeline] = pPipeNode[i];
6373        }
6374        lock.unlock();
6375    } else {
6376        for (i = 0; i < count; i++) {
6377            delete pPipeNode[i];
6378        }
6379        lock.unlock();
6380        return VK_ERROR_VALIDATION_FAILED_EXT;
6381    }
6382    return result;
6383}
6384
6385VKAPI_ATTR VkResult VKAPI_CALL
6386CreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
6387                       const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
6388                       VkPipeline *pPipelines) {
6389    VkResult result = VK_SUCCESS;
6390    bool skip_call = false;
6391
6392    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
6393    vector<PIPELINE_NODE *> pPipeNode(count);
6394    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6395
6396    uint32_t i = 0;
6397    std::unique_lock<std::mutex> lock(global_lock);
6398    for (i = 0; i < count; i++) {
6399        // TODO: Verify compute stage bits
6400
6401        // Create and initialize internal tracking data structure
6402        pPipeNode[i] = new PIPELINE_NODE;
6403        pPipeNode[i]->initComputePipeline(&pCreateInfos[i]);
6404        pPipeNode[i]->pipeline_layout = *getPipelineLayout(dev_data, pCreateInfos[i].layout);
6405        // memcpy(&pPipeNode[i]->computePipelineCI, (const void *)&pCreateInfos[i], sizeof(VkComputePipelineCreateInfo));
6406
6407        // TODO: Add Compute Pipeline Verification
6408        skip_call |= !validate_compute_pipeline(dev_data->report_data, pPipeNode[i], &dev_data->phys_dev_properties.features,
6409                                                dev_data->shaderModuleMap);
6410        // skip_call |= verifyPipelineCreateState(dev_data, device, pPipeNode[i]);
6411    }
6412
6413    if (!skip_call) {
6414        lock.unlock();
6415        result = dev_data->device_dispatch_table->CreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator,
6416                                                                         pPipelines);
6417        lock.lock();
6418        for (i = 0; i < count; i++) {
6419            pPipeNode[i]->pipeline = pPipelines[i];
6420            dev_data->pipelineMap[pPipeNode[i]->pipeline] = pPipeNode[i];
6421        }
6422        lock.unlock();
6423    } else {
6424        for (i = 0; i < count; i++) {
6425            // Clean up any locally allocated data structures
6426            delete pPipeNode[i];
6427        }
6428        lock.unlock();
6429        return VK_ERROR_VALIDATION_FAILED_EXT;
6430    }
6431    return result;
6432}
6433
6434VKAPI_ATTR VkResult VKAPI_CALL CreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo,
6435                                             const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) {
6436    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6437    VkResult result = dev_data->device_dispatch_table->CreateSampler(device, pCreateInfo, pAllocator, pSampler);
6438    if (VK_SUCCESS == result) {
6439        std::lock_guard<std::mutex> lock(global_lock);
6440        dev_data->samplerMap[*pSampler] = unique_ptr<SAMPLER_NODE>(new SAMPLER_NODE(pSampler, pCreateInfo));
6441    }
6442    return result;
6443}
6444
6445VKAPI_ATTR VkResult VKAPI_CALL
6446CreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
6447                          const VkAllocationCallbacks *pAllocator, VkDescriptorSetLayout *pSetLayout) {
6448    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6449    VkResult result = dev_data->device_dispatch_table->CreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout);
6450    if (VK_SUCCESS == result) {
6451        // TODOSC : Capture layout bindings set
6452        std::lock_guard<std::mutex> lock(global_lock);
6453        dev_data->descriptorSetLayoutMap[*pSetLayout] =
6454            new cvdescriptorset::DescriptorSetLayout(dev_data->report_data, pCreateInfo, *pSetLayout);
6455    }
6456    return result;
6457}
6458
6459// Used by CreatePipelineLayout and CmdPushConstants.
6460// Note that the index argument is optional and only used by CreatePipelineLayout.
6461static bool validatePushConstantRange(const layer_data *dev_data, const uint32_t offset, const uint32_t size,
6462                                      const char *caller_name, uint32_t index = 0) {
6463    uint32_t const maxPushConstantsSize = dev_data->phys_dev_properties.properties.limits.maxPushConstantsSize;
6464    bool skip_call = false;
6465    // Check that offset + size don't exceed the max.
6466    // Prevent arithetic overflow here by avoiding addition and testing in this order.
6467    if ((offset >= maxPushConstantsSize) || (size > maxPushConstantsSize - offset)) {
6468        // This is a pain just to adapt the log message to the caller, but better to sort it out only when there is a problem.
6469        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
6470            skip_call |=
6471                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6472                        DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants index %u with offset %u and size %u that "
6473                                                              "exceeds this device's maxPushConstantSize of %u.",
6474                        caller_name, index, offset, size, maxPushConstantsSize);
6475        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
6476            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6477                                 DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with offset %u and size %u that "
6478                                                                       "exceeds this device's maxPushConstantSize of %u.",
6479                                 caller_name, offset, size, maxPushConstantsSize);
6480        } else {
6481            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6482                                 DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
6483        }
6484    }
6485    // size needs to be non-zero and a multiple of 4.
6486    if ((size == 0) || ((size & 0x3) != 0)) {
6487        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
6488            skip_call |=
6489                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6490                        DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants index %u with "
6491                                                              "size %u. Size must be greater than zero and a multiple of 4.",
6492                        caller_name, index, size);
6493        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
6494            skip_call |=
6495                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6496                        DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with "
6497                                                              "size %u. Size must be greater than zero and a multiple of 4.",
6498                        caller_name, size);
6499        } else {
6500            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6501                                 DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
6502        }
6503    }
6504    // offset needs to be a multiple of 4.
6505    if ((offset & 0x3) != 0) {
6506        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
6507            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6508                                 DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants index %u with "
6509                                                                       "offset %u. Offset must be a multiple of 4.",
6510                                 caller_name, index, offset);
6511        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
6512            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6513                                 DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with "
6514                                                                       "offset %u. Offset must be a multiple of 4.",
6515                                 caller_name, offset);
6516        } else {
6517            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6518                                 DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
6519        }
6520    }
6521    return skip_call;
6522}
6523
6524VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
6525                                                    const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout) {
6526    bool skip_call = false;
6527    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6528    // Push Constant Range checks
6529    uint32_t i, j;
6530    for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
6531        skip_call |= validatePushConstantRange(dev_data, pCreateInfo->pPushConstantRanges[i].offset,
6532                                               pCreateInfo->pPushConstantRanges[i].size, "vkCreatePipelineLayout()", i);
6533        if (0 == pCreateInfo->pPushConstantRanges[i].stageFlags) {
6534            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6535                                 DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCreatePipelineLayout() call has no stageFlags set.");
6536        }
6537    }
6538    if (skip_call)
6539        return VK_ERROR_VALIDATION_FAILED_EXT;
6540
6541    // Each range has been validated.  Now check for overlap between ranges (if they are good).
6542    // There's no explicit Valid Usage language against this, so issue a warning instead of an error.
6543    for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
6544        for (j = i + 1; j < pCreateInfo->pushConstantRangeCount; ++j) {
6545            const uint32_t minA = pCreateInfo->pPushConstantRanges[i].offset;
6546            const uint32_t maxA = minA + pCreateInfo->pPushConstantRanges[i].size;
6547            const uint32_t minB = pCreateInfo->pPushConstantRanges[j].offset;
6548            const uint32_t maxB = minB + pCreateInfo->pPushConstantRanges[j].size;
6549            if ((minA <= minB && maxA > minB) || (minB <= minA && maxB > minA)) {
6550                skip_call |=
6551                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6552                            DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCreatePipelineLayout() call has push constants with "
6553                                                                  "overlapping ranges: %u:[%u, %u), %u:[%u, %u)",
6554                            i, minA, maxA, j, minB, maxB);
6555            }
6556        }
6557    }
6558
6559    VkResult result = dev_data->device_dispatch_table->CreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout);
6560    if (VK_SUCCESS == result) {
6561        std::lock_guard<std::mutex> lock(global_lock);
6562        PIPELINE_LAYOUT_NODE &plNode = dev_data->pipelineLayoutMap[*pPipelineLayout];
6563        plNode.layout = *pPipelineLayout;
6564        plNode.set_layouts.resize(pCreateInfo->setLayoutCount);
6565        for (i = 0; i < pCreateInfo->setLayoutCount; ++i) {
6566            plNode.set_layouts[i] = getDescriptorSetLayout(dev_data, pCreateInfo->pSetLayouts[i]);
6567        }
6568        plNode.push_constant_ranges.resize(pCreateInfo->pushConstantRangeCount);
6569        for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
6570            plNode.push_constant_ranges[i] = pCreateInfo->pPushConstantRanges[i];
6571        }
6572    }
6573    return result;
6574}
6575
6576VKAPI_ATTR VkResult VKAPI_CALL
6577CreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
6578                     VkDescriptorPool *pDescriptorPool) {
6579    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6580    VkResult result = dev_data->device_dispatch_table->CreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool);
6581    if (VK_SUCCESS == result) {
6582        // Insert this pool into Global Pool LL at head
6583        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6584                    (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS", "Created Descriptor Pool 0x%" PRIxLEAST64,
6585                    (uint64_t)*pDescriptorPool))
6586            return VK_ERROR_VALIDATION_FAILED_EXT;
6587        DESCRIPTOR_POOL_NODE *pNewNode = new DESCRIPTOR_POOL_NODE(*pDescriptorPool, pCreateInfo);
6588        if (NULL == pNewNode) {
6589            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6590                        (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
6591                        "Out of memory while attempting to allocate DESCRIPTOR_POOL_NODE in vkCreateDescriptorPool()"))
6592                return VK_ERROR_VALIDATION_FAILED_EXT;
6593        } else {
6594            std::lock_guard<std::mutex> lock(global_lock);
6595            dev_data->descriptorPoolMap[*pDescriptorPool] = pNewNode;
6596        }
6597    } else {
6598        // Need to do anything if pool create fails?
6599    }
6600    return result;
6601}
6602
6603VKAPI_ATTR VkResult VKAPI_CALL
6604ResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags) {
6605    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6606    VkResult result = dev_data->device_dispatch_table->ResetDescriptorPool(device, descriptorPool, flags);
6607    if (VK_SUCCESS == result) {
6608        std::lock_guard<std::mutex> lock(global_lock);
6609        clearDescriptorPool(dev_data, device, descriptorPool, flags);
6610    }
6611    return result;
6612}
6613// Ensure the pool contains enough descriptors and descriptor sets to satisfy
6614// an allocation request. Fills common_data with the total number of descriptors of each type required,
6615// as well as DescriptorSetLayout ptrs used for later update.
6616static bool PreCallValidateAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo,
6617                                                  cvdescriptorset::AllocateDescriptorSetsData *common_data) {
6618    // All state checks for AllocateDescriptorSets is done in single function
6619    return cvdescriptorset::ValidateAllocateDescriptorSets(dev_data->report_data, pAllocateInfo, dev_data, common_data);
6620}
6621// Allocation state was good and call down chain was made so update state based on allocating descriptor sets
6622static void PostCallRecordAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo,
6623                                                 VkDescriptorSet *pDescriptorSets,
6624                                                 const cvdescriptorset::AllocateDescriptorSetsData *common_data) {
6625    // All the updates are contained in a single cvdescriptorset function
6626    cvdescriptorset::PerformAllocateDescriptorSets(pAllocateInfo, pDescriptorSets, common_data, &dev_data->descriptorPoolMap,
6627                                                   &dev_data->setMap, dev_data);
6628}
6629
6630VKAPI_ATTR VkResult VKAPI_CALL
6631AllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo, VkDescriptorSet *pDescriptorSets) {
6632    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6633    std::unique_lock<std::mutex> lock(global_lock);
6634    cvdescriptorset::AllocateDescriptorSetsData common_data(pAllocateInfo->descriptorSetCount);
6635    bool skip_call = PreCallValidateAllocateDescriptorSets(dev_data, pAllocateInfo, &common_data);
6636    lock.unlock();
6637
6638    if (skip_call)
6639        return VK_ERROR_VALIDATION_FAILED_EXT;
6640
6641    VkResult result = dev_data->device_dispatch_table->AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
6642
6643    if (VK_SUCCESS == result) {
6644        lock.lock();
6645        PostCallRecordAllocateDescriptorSets(dev_data, pAllocateInfo, pDescriptorSets, &common_data);
6646        lock.unlock();
6647    }
6648    return result;
6649}
6650// Verify state before freeing DescriptorSets
6651static bool PreCallValidateFreeDescriptorSets(const layer_data *dev_data, VkDescriptorPool pool, uint32_t count,
6652                                              const VkDescriptorSet *descriptor_sets) {
6653    bool skip_call = false;
6654    // First make sure sets being destroyed are not currently in-use
6655    for (uint32_t i = 0; i < count; ++i)
6656        skip_call |= validateIdleDescriptorSet(dev_data, descriptor_sets[i], "vkFreeDescriptorSets");
6657
6658    DESCRIPTOR_POOL_NODE *pool_node = getPoolNode(dev_data, pool);
6659    if (pool_node && !(VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT & pool_node->createInfo.flags)) {
6660        // Can't Free from a NON_FREE pool
6661        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6662                             reinterpret_cast<uint64_t &>(pool), __LINE__, DRAWSTATE_CANT_FREE_FROM_NON_FREE_POOL, "DS",
6663                             "It is invalid to call vkFreeDescriptorSets() with a pool created without setting "
6664                             "VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT.");
6665    }
6666    return skip_call;
6667}
6668// Sets have been removed from the pool so update underlying state
6669static void PostCallRecordFreeDescriptorSets(layer_data *dev_data, VkDescriptorPool pool, uint32_t count,
6670                                             const VkDescriptorSet *descriptor_sets) {
6671    DESCRIPTOR_POOL_NODE *pool_state = getPoolNode(dev_data, pool);
6672    // Update available descriptor sets in pool
6673    pool_state->availableSets += count;
6674
6675    // For each freed descriptor add its resources back into the pool as available and remove from pool and setMap
6676    for (uint32_t i = 0; i < count; ++i) {
6677        auto set_state = dev_data->setMap[descriptor_sets[i]];
6678        uint32_t type_index = 0, descriptor_count = 0;
6679        for (uint32_t j = 0; j < set_state->GetBindingCount(); ++j) {
6680            type_index = static_cast<uint32_t>(set_state->GetTypeFromIndex(j));
6681            descriptor_count = set_state->GetDescriptorCountFromIndex(j);
6682            pool_state->availableDescriptorTypeCount[type_index] += descriptor_count;
6683        }
6684        freeDescriptorSet(dev_data, set_state);
6685        pool_state->sets.erase(set_state);
6686    }
6687}
6688
6689VKAPI_ATTR VkResult VKAPI_CALL
6690FreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count, const VkDescriptorSet *pDescriptorSets) {
6691    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6692    // Make sure that no sets being destroyed are in-flight
6693    std::unique_lock<std::mutex> lock(global_lock);
6694    bool skip_call = PreCallValidateFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
6695    lock.unlock();
6696
6697    if (skip_call)
6698        return VK_ERROR_VALIDATION_FAILED_EXT;
6699    VkResult result = dev_data->device_dispatch_table->FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets);
6700    if (VK_SUCCESS == result) {
6701        lock.lock();
6702        PostCallRecordFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
6703        lock.unlock();
6704    }
6705    return result;
6706}
6707// TODO : This is a Proof-of-concept for core validation architecture
6708//  Really we'll want to break out these functions to separate files but
6709//  keeping it all together here to prove out design
6710// PreCallValidate* handles validating all of the state prior to calling down chain to UpdateDescriptorSets()
6711static bool PreCallValidateUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
6712                                                const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
6713                                                const VkCopyDescriptorSet *pDescriptorCopies) {
6714    // First thing to do is perform map look-ups.
6715    // NOTE : UpdateDescriptorSets is somewhat unique in that it's operating on a number of DescriptorSets
6716    //  so we can't just do a single map look-up up-front, but do them individually in functions below
6717
6718    // Now make call(s) that validate state, but don't perform state updates in this function
6719    // Note, here DescriptorSets is unique in that we don't yet have an instance. Using a helper function in the
6720    //  namespace which will parse params and make calls into specific class instances
6721    return cvdescriptorset::ValidateUpdateDescriptorSets(dev_data->report_data, dev_data, descriptorWriteCount, pDescriptorWrites,
6722                                                         descriptorCopyCount, pDescriptorCopies);
6723}
6724// PostCallRecord* handles recording state updates following call down chain to UpdateDescriptorSets()
6725static void PostCallRecordUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
6726                                               const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
6727                                               const VkCopyDescriptorSet *pDescriptorCopies) {
6728    cvdescriptorset::PerformUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6729                                                 pDescriptorCopies);
6730}
6731
6732VKAPI_ATTR void VKAPI_CALL
6733UpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites,
6734                     uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pDescriptorCopies) {
6735    // Only map look-up at top level is for device-level layer_data
6736    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6737    std::unique_lock<std::mutex> lock(global_lock);
6738    bool skip_call = PreCallValidateUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6739                                                         pDescriptorCopies);
6740    lock.unlock();
6741    if (!skip_call) {
6742        dev_data->device_dispatch_table->UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6743                                                              pDescriptorCopies);
6744        lock.lock();
6745        // Since UpdateDescriptorSets() is void, nothing to check prior to updating state
6746        PostCallRecordUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6747                                           pDescriptorCopies);
6748    }
6749}
6750
6751VKAPI_ATTR VkResult VKAPI_CALL
6752AllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pCreateInfo, VkCommandBuffer *pCommandBuffer) {
6753    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6754    VkResult result = dev_data->device_dispatch_table->AllocateCommandBuffers(device, pCreateInfo, pCommandBuffer);
6755    if (VK_SUCCESS == result) {
6756        std::unique_lock<std::mutex> lock(global_lock);
6757        auto pPool = getCommandPoolNode(dev_data, pCreateInfo->commandPool);
6758
6759        if (pPool) {
6760            for (uint32_t i = 0; i < pCreateInfo->commandBufferCount; i++) {
6761                // Add command buffer to its commandPool map
6762                pPool->commandBuffers.push_back(pCommandBuffer[i]);
6763                GLOBAL_CB_NODE *pCB = new GLOBAL_CB_NODE;
6764                // Add command buffer to map
6765                dev_data->commandBufferMap[pCommandBuffer[i]] = pCB;
6766                resetCB(dev_data, pCommandBuffer[i]);
6767                pCB->createInfo = *pCreateInfo;
6768                pCB->device = device;
6769            }
6770        }
6771        printCBList(dev_data);
6772        lock.unlock();
6773    }
6774    return result;
6775}
6776
6777// Add bindings between the given cmd buffer & framebuffer and the framebuffer's children
6778static void AddFramebufferBinding(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, FRAMEBUFFER_NODE *fb_state) {
6779    fb_state->cb_bindings.insert(cb_state);
6780    for (auto attachment : fb_state->attachments) {
6781        auto view_state = attachment.view_state;
6782        if (view_state) {
6783            AddCommandBufferBindingImageView(dev_data, cb_state, view_state);
6784        }
6785        auto rp_state = getRenderPass(dev_data, fb_state->createInfo.renderPass);
6786        if (rp_state) {
6787            addCommandBufferBinding(
6788                &rp_state->cb_bindings,
6789                {reinterpret_cast<uint64_t &>(rp_state->renderPass), VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT}, cb_state);
6790        }
6791    }
6792}
6793
6794VKAPI_ATTR VkResult VKAPI_CALL
6795BeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) {
6796    bool skip_call = false;
6797    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6798    std::unique_lock<std::mutex> lock(global_lock);
6799    // Validate command buffer level
6800    GLOBAL_CB_NODE *cb_node = getCBNode(dev_data, commandBuffer);
6801    if (cb_node) {
6802        // This implicitly resets the Cmd Buffer so make sure any fence is done and then clear memory references
6803        if (dev_data->globalInFlightCmdBuffers.count(commandBuffer)) {
6804            skip_call |=
6805                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6806                        (uint64_t)commandBuffer, __LINE__, MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM",
6807                        "Calling vkBeginCommandBuffer() on active CB 0x%p before it has completed. "
6808                        "You must check CB fence before this call.",
6809                        commandBuffer);
6810        }
6811        clear_cmd_buf_and_mem_references(dev_data, cb_node);
6812        if (cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
6813            // Secondary Command Buffer
6814            const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo;
6815            if (!pInfo) {
6816                skip_call |=
6817                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6818                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6819                            "vkBeginCommandBuffer(): Secondary Command Buffer (0x%p) must have inheritance info.",
6820                            reinterpret_cast<void *>(commandBuffer));
6821            } else {
6822                if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
6823                    if (!pInfo->renderPass) { // renderpass should NOT be null for a Secondary CB
6824                        skip_call |= log_msg(
6825                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6826                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6827                            "vkBeginCommandBuffer(): Secondary Command Buffers (0x%p) must specify a valid renderpass parameter.",
6828                            reinterpret_cast<void *>(commandBuffer));
6829                    }
6830                    if (!pInfo->framebuffer) { // framebuffer may be null for a Secondary CB, but this affects perf
6831                        skip_call |= log_msg(
6832                            dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6833                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6834                            "vkBeginCommandBuffer(): Secondary Command Buffers (0x%p) may perform better if a "
6835                            "valid framebuffer parameter is specified.",
6836                            reinterpret_cast<void *>(commandBuffer));
6837                    } else {
6838                        string errorString = "";
6839                        auto framebuffer = getFramebuffer(dev_data, pInfo->framebuffer);
6840                        if (framebuffer) {
6841                            if ((framebuffer->createInfo.renderPass != pInfo->renderPass) &&
6842                                !verify_renderpass_compatibility(dev_data, framebuffer->renderPassCreateInfo.ptr(),
6843                                                                 getRenderPass(dev_data, pInfo->renderPass)->pCreateInfo,
6844                                                                 errorString)) {
6845                                // renderPass that framebuffer was created with must be compatible with local renderPass
6846                                skip_call |= log_msg(
6847                                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6848                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(commandBuffer),
6849                                    __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
6850                                    "vkBeginCommandBuffer(): Secondary Command "
6851                                    "Buffer (0x%p) renderPass (0x%" PRIxLEAST64 ") is incompatible w/ framebuffer "
6852                                    "(0x%" PRIxLEAST64 ") w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
6853                                    reinterpret_cast<void *>(commandBuffer), reinterpret_cast<const uint64_t &>(pInfo->renderPass),
6854                                    reinterpret_cast<const uint64_t &>(pInfo->framebuffer),
6855                                    reinterpret_cast<uint64_t &>(framebuffer->createInfo.renderPass), errorString.c_str());
6856                            }
6857                            // Connect this framebuffer and its children to this cmdBuffer
6858                            AddFramebufferBinding(dev_data, cb_node, framebuffer);
6859                        }
6860                    }
6861                }
6862                if ((pInfo->occlusionQueryEnable == VK_FALSE ||
6863                     dev_data->phys_dev_properties.features.occlusionQueryPrecise == VK_FALSE) &&
6864                    (pInfo->queryFlags & VK_QUERY_CONTROL_PRECISE_BIT)) {
6865                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6866                                         VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(commandBuffer),
6867                                         __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6868                                         "vkBeginCommandBuffer(): Secondary Command Buffer (0x%p) must not have "
6869                                         "VK_QUERY_CONTROL_PRECISE_BIT if occulusionQuery is disabled or the device does not "
6870                                         "support precise occlusion queries.",
6871                                         reinterpret_cast<void *>(commandBuffer));
6872                }
6873            }
6874            if (pInfo && pInfo->renderPass != VK_NULL_HANDLE) {
6875                auto renderPass = getRenderPass(dev_data, pInfo->renderPass);
6876                if (renderPass) {
6877                    if (pInfo->subpass >= renderPass->pCreateInfo->subpassCount) {
6878                        skip_call |= log_msg(
6879                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6880                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6881                            "vkBeginCommandBuffer(): Secondary Command Buffers (0x%p) must has a subpass index (%d) "
6882                            "that is less than the number of subpasses (%d).",
6883                            (void *)commandBuffer, pInfo->subpass, renderPass->pCreateInfo->subpassCount);
6884                    }
6885                }
6886            }
6887        }
6888        if (CB_RECORDING == cb_node->state) {
6889            skip_call |=
6890                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6891                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6892                        "vkBeginCommandBuffer(): Cannot call Begin on CB (0x%" PRIxLEAST64
6893                        ") in the RECORDING state. Must first call vkEndCommandBuffer().",
6894                        (uint64_t)commandBuffer);
6895        } else if (CB_RECORDED == cb_node->state || (CB_INVALID == cb_node->state && CMD_END == cb_node->cmds.back().type)) {
6896            VkCommandPool cmdPool = cb_node->createInfo.commandPool;
6897            auto pPool = getCommandPoolNode(dev_data, cmdPool);
6898            if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
6899                skip_call |=
6900                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6901                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
6902                            "Call to vkBeginCommandBuffer() on command buffer (0x%" PRIxLEAST64
6903                            ") attempts to implicitly reset cmdBuffer created from command pool (0x%" PRIxLEAST64
6904                            ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
6905                            (uint64_t)commandBuffer, (uint64_t)cmdPool);
6906            }
6907            resetCB(dev_data, commandBuffer);
6908        }
6909        // Set updated state here in case implicit reset occurs above
6910        cb_node->state = CB_RECORDING;
6911        cb_node->beginInfo = *pBeginInfo;
6912        if (cb_node->beginInfo.pInheritanceInfo) {
6913            cb_node->inheritanceInfo = *(cb_node->beginInfo.pInheritanceInfo);
6914            cb_node->beginInfo.pInheritanceInfo = &cb_node->inheritanceInfo;
6915            // If we are a secondary command-buffer and inheriting.  Update the items we should inherit.
6916            if ((cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) &&
6917                (cb_node->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
6918                cb_node->activeRenderPass = getRenderPass(dev_data, cb_node->beginInfo.pInheritanceInfo->renderPass);
6919                cb_node->activeSubpass = cb_node->beginInfo.pInheritanceInfo->subpass;
6920                cb_node->framebuffers.insert(cb_node->beginInfo.pInheritanceInfo->framebuffer);
6921            }
6922        }
6923    } else {
6924        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6925                             (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
6926                             "In vkBeginCommandBuffer() and unable to find CommandBuffer Node for CB 0x%p!", (void *)commandBuffer);
6927    }
6928    lock.unlock();
6929    if (skip_call) {
6930        return VK_ERROR_VALIDATION_FAILED_EXT;
6931    }
6932    VkResult result = dev_data->device_dispatch_table->BeginCommandBuffer(commandBuffer, pBeginInfo);
6933
6934    return result;
6935}
6936
6937VKAPI_ATTR VkResult VKAPI_CALL EndCommandBuffer(VkCommandBuffer commandBuffer) {
6938    bool skip_call = false;
6939    VkResult result = VK_SUCCESS;
6940    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6941    std::unique_lock<std::mutex> lock(global_lock);
6942    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6943    if (pCB) {
6944        if ((VK_COMMAND_BUFFER_LEVEL_PRIMARY == pCB->createInfo.level) || !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
6945            // This needs spec clarification to update valid usage, see comments in PR:
6946            // https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/pull/516#discussion_r63013756
6947            skip_call |= insideRenderPass(dev_data, pCB, "vkEndCommandBuffer");
6948        }
6949        skip_call |= addCmd(dev_data, pCB, CMD_END, "vkEndCommandBuffer()");
6950        for (auto query : pCB->activeQueries) {
6951            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6952                                 DRAWSTATE_INVALID_QUERY, "DS",
6953                                 "Ending command buffer with in progress query: queryPool 0x%" PRIx64 ", index %d",
6954                                 (uint64_t)(query.pool), query.index);
6955        }
6956    }
6957    if (!skip_call) {
6958        lock.unlock();
6959        result = dev_data->device_dispatch_table->EndCommandBuffer(commandBuffer);
6960        lock.lock();
6961        if (VK_SUCCESS == result) {
6962            pCB->state = CB_RECORDED;
6963            // Reset CB status flags
6964            pCB->status = 0;
6965            printCB(dev_data, commandBuffer);
6966        }
6967    } else {
6968        result = VK_ERROR_VALIDATION_FAILED_EXT;
6969    }
6970    lock.unlock();
6971    return result;
6972}
6973
6974VKAPI_ATTR VkResult VKAPI_CALL
6975ResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) {
6976    bool skip_call = false;
6977    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6978    std::unique_lock<std::mutex> lock(global_lock);
6979    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6980    VkCommandPool cmdPool = pCB->createInfo.commandPool;
6981    auto pPool = getCommandPoolNode(dev_data, cmdPool);
6982    if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
6983        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6984                             (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
6985                             "Attempt to reset command buffer (0x%" PRIxLEAST64 ") created from command pool (0x%" PRIxLEAST64
6986                             ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
6987                             (uint64_t)commandBuffer, (uint64_t)cmdPool);
6988    }
6989    skip_call |= checkCommandBufferInFlight(dev_data, pCB, "reset");
6990    lock.unlock();
6991    if (skip_call)
6992        return VK_ERROR_VALIDATION_FAILED_EXT;
6993    VkResult result = dev_data->device_dispatch_table->ResetCommandBuffer(commandBuffer, flags);
6994    if (VK_SUCCESS == result) {
6995        lock.lock();
6996        dev_data->globalInFlightCmdBuffers.erase(commandBuffer);
6997        resetCB(dev_data, commandBuffer);
6998        lock.unlock();
6999    }
7000    return result;
7001}
7002
7003VKAPI_ATTR void VKAPI_CALL
7004CmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline) {
7005    bool skip_call = false;
7006    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7007    std::unique_lock<std::mutex> lock(global_lock);
7008    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7009    if (pCB) {
7010        skip_call |= addCmd(dev_data, pCB, CMD_BINDPIPELINE, "vkCmdBindPipeline()");
7011        if ((VK_PIPELINE_BIND_POINT_COMPUTE == pipelineBindPoint) && (pCB->activeRenderPass)) {
7012            skip_call |=
7013                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
7014                        (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
7015                        "Incorrectly binding compute pipeline (0x%" PRIxLEAST64 ") during active RenderPass (0x%" PRIxLEAST64 ")",
7016                        (uint64_t)pipeline, (uint64_t)pCB->activeRenderPass->renderPass);
7017        }
7018
7019        PIPELINE_NODE *pPN = getPipeline(dev_data, pipeline);
7020        if (pPN) {
7021            pCB->lastBound[pipelineBindPoint].pipeline_node = pPN;
7022            set_cb_pso_status(pCB, pPN);
7023            set_pipeline_state(pPN);
7024        } else {
7025            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
7026                                 (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_PIPELINE, "DS",
7027                                 "Attempt to bind Pipeline 0x%" PRIxLEAST64 " that doesn't exist!", (uint64_t)(pipeline));
7028        }
7029        addCommandBufferBinding(&getPipeline(dev_data, pipeline)->cb_bindings,
7030                                {reinterpret_cast<uint64_t &>(pipeline), VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT}, pCB);
7031    }
7032    lock.unlock();
7033    if (!skip_call)
7034        dev_data->device_dispatch_table->CmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline);
7035}
7036
7037VKAPI_ATTR void VKAPI_CALL
7038CmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport *pViewports) {
7039    bool skip_call = false;
7040    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7041    std::unique_lock<std::mutex> lock(global_lock);
7042    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7043    if (pCB) {
7044        skip_call |= addCmd(dev_data, pCB, CMD_SETVIEWPORTSTATE, "vkCmdSetViewport()");
7045        pCB->viewportMask |= ((1u<<viewportCount) - 1u) << firstViewport;
7046    }
7047    lock.unlock();
7048    if (!skip_call)
7049        dev_data->device_dispatch_table->CmdSetViewport(commandBuffer, firstViewport, viewportCount, pViewports);
7050}
7051
7052VKAPI_ATTR void VKAPI_CALL
7053CmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D *pScissors) {
7054    bool skip_call = false;
7055    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7056    std::unique_lock<std::mutex> lock(global_lock);
7057    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7058    if (pCB) {
7059        skip_call |= addCmd(dev_data, pCB, CMD_SETSCISSORSTATE, "vkCmdSetScissor()");
7060        pCB->scissorMask |= ((1u<<scissorCount) - 1u) << firstScissor;
7061    }
7062    lock.unlock();
7063    if (!skip_call)
7064        dev_data->device_dispatch_table->CmdSetScissor(commandBuffer, firstScissor, scissorCount, pScissors);
7065}
7066
7067VKAPI_ATTR void VKAPI_CALL CmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) {
7068    bool skip_call = false;
7069    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7070    std::unique_lock<std::mutex> lock(global_lock);
7071    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7072    if (pCB) {
7073        skip_call |= addCmd(dev_data, pCB, CMD_SETLINEWIDTHSTATE, "vkCmdSetLineWidth()");
7074        pCB->status |= CBSTATUS_LINE_WIDTH_SET;
7075
7076        PIPELINE_NODE *pPipeTrav = pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline_node;
7077        if (pPipeTrav != NULL && !isDynamic(pPipeTrav, VK_DYNAMIC_STATE_LINE_WIDTH)) {
7078            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
7079                                 reinterpret_cast<uint64_t &>(commandBuffer), __LINE__, DRAWSTATE_INVALID_SET, "DS",
7080                                 "vkCmdSetLineWidth called but pipeline was created without VK_DYNAMIC_STATE_LINE_WIDTH "
7081                                 "flag.  This is undefined behavior and could be ignored.");
7082        } else {
7083            skip_call |= verifyLineWidth(dev_data, DRAWSTATE_INVALID_SET, reinterpret_cast<uint64_t &>(commandBuffer), lineWidth);
7084        }
7085    }
7086    lock.unlock();
7087    if (!skip_call)
7088        dev_data->device_dispatch_table->CmdSetLineWidth(commandBuffer, lineWidth);
7089}
7090
7091VKAPI_ATTR void VKAPI_CALL
7092CmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor) {
7093    bool skip_call = false;
7094    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7095    std::unique_lock<std::mutex> lock(global_lock);
7096    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7097    if (pCB) {
7098        skip_call |= addCmd(dev_data, pCB, CMD_SETDEPTHBIASSTATE, "vkCmdSetDepthBias()");
7099        pCB->status |= CBSTATUS_DEPTH_BIAS_SET;
7100    }
7101    lock.unlock();
7102    if (!skip_call)
7103        dev_data->device_dispatch_table->CmdSetDepthBias(commandBuffer, depthBiasConstantFactor, depthBiasClamp,
7104                                                         depthBiasSlopeFactor);
7105}
7106
7107VKAPI_ATTR void VKAPI_CALL CmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) {
7108    bool skip_call = false;
7109    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7110    std::unique_lock<std::mutex> lock(global_lock);
7111    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7112    if (pCB) {
7113        skip_call |= addCmd(dev_data, pCB, CMD_SETBLENDSTATE, "vkCmdSetBlendConstants()");
7114        pCB->status |= CBSTATUS_BLEND_CONSTANTS_SET;
7115    }
7116    lock.unlock();
7117    if (!skip_call)
7118        dev_data->device_dispatch_table->CmdSetBlendConstants(commandBuffer, blendConstants);
7119}
7120
7121VKAPI_ATTR void VKAPI_CALL
7122CmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) {
7123    bool skip_call = false;
7124    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7125    std::unique_lock<std::mutex> lock(global_lock);
7126    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7127    if (pCB) {
7128        skip_call |= addCmd(dev_data, pCB, CMD_SETDEPTHBOUNDSSTATE, "vkCmdSetDepthBounds()");
7129        pCB->status |= CBSTATUS_DEPTH_BOUNDS_SET;
7130    }
7131    lock.unlock();
7132    if (!skip_call)
7133        dev_data->device_dispatch_table->CmdSetDepthBounds(commandBuffer, minDepthBounds, maxDepthBounds);
7134}
7135
7136VKAPI_ATTR void VKAPI_CALL
7137CmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask) {
7138    bool skip_call = false;
7139    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7140    std::unique_lock<std::mutex> lock(global_lock);
7141    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7142    if (pCB) {
7143        skip_call |= addCmd(dev_data, pCB, CMD_SETSTENCILREADMASKSTATE, "vkCmdSetStencilCompareMask()");
7144        pCB->status |= CBSTATUS_STENCIL_READ_MASK_SET;
7145    }
7146    lock.unlock();
7147    if (!skip_call)
7148        dev_data->device_dispatch_table->CmdSetStencilCompareMask(commandBuffer, faceMask, compareMask);
7149}
7150
7151VKAPI_ATTR void VKAPI_CALL
7152CmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask) {
7153    bool skip_call = false;
7154    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7155    std::unique_lock<std::mutex> lock(global_lock);
7156    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7157    if (pCB) {
7158        skip_call |= addCmd(dev_data, pCB, CMD_SETSTENCILWRITEMASKSTATE, "vkCmdSetStencilWriteMask()");
7159        pCB->status |= CBSTATUS_STENCIL_WRITE_MASK_SET;
7160    }
7161    lock.unlock();
7162    if (!skip_call)
7163        dev_data->device_dispatch_table->CmdSetStencilWriteMask(commandBuffer, faceMask, writeMask);
7164}
7165
7166VKAPI_ATTR void VKAPI_CALL
7167CmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference) {
7168    bool skip_call = false;
7169    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7170    std::unique_lock<std::mutex> lock(global_lock);
7171    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7172    if (pCB) {
7173        skip_call |= addCmd(dev_data, pCB, CMD_SETSTENCILREFERENCESTATE, "vkCmdSetStencilReference()");
7174        pCB->status |= CBSTATUS_STENCIL_REFERENCE_SET;
7175    }
7176    lock.unlock();
7177    if (!skip_call)
7178        dev_data->device_dispatch_table->CmdSetStencilReference(commandBuffer, faceMask, reference);
7179}
7180
7181VKAPI_ATTR void VKAPI_CALL
7182CmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout,
7183                      uint32_t firstSet, uint32_t setCount, const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount,
7184                      const uint32_t *pDynamicOffsets) {
7185    bool skip_call = false;
7186    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7187    std::unique_lock<std::mutex> lock(global_lock);
7188    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7189    if (pCB) {
7190        if (pCB->state == CB_RECORDING) {
7191            // Track total count of dynamic descriptor types to make sure we have an offset for each one
7192            uint32_t totalDynamicDescriptors = 0;
7193            string errorString = "";
7194            uint32_t lastSetIndex = firstSet + setCount - 1;
7195            if (lastSetIndex >= pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size()) {
7196                pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1);
7197                pCB->lastBound[pipelineBindPoint].dynamicOffsets.resize(lastSetIndex + 1);
7198            }
7199            auto oldFinalBoundSet = pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex];
7200            auto pipeline_layout = getPipelineLayout(dev_data, layout);
7201            for (uint32_t i = 0; i < setCount; i++) {
7202                cvdescriptorset::DescriptorSet *pSet = getSetNode(dev_data, pDescriptorSets[i]);
7203                if (pSet) {
7204                    pCB->lastBound[pipelineBindPoint].pipeline_layout = *pipeline_layout;
7205                    pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i + firstSet] = pSet;
7206                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
7207                                         VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7208                                         DRAWSTATE_NONE, "DS", "DS 0x%" PRIxLEAST64 " bound on pipeline %s",
7209                                         (uint64_t)pDescriptorSets[i], string_VkPipelineBindPoint(pipelineBindPoint));
7210                    if (!pSet->IsUpdated() && (pSet->GetTotalDescriptorCount() != 0)) {
7211                        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
7212                                             VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7213                                             DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
7214                                             "DS 0x%" PRIxLEAST64
7215                                             " bound but it was never updated. You may want to either update it or not bind it.",
7216                                             (uint64_t)pDescriptorSets[i]);
7217                    }
7218                    // Verify that set being bound is compatible with overlapping setLayout of pipelineLayout
7219                    if (!verify_set_layout_compatibility(dev_data, pSet, pipeline_layout, i + firstSet, errorString)) {
7220                        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7221                                             VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7222                                             DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
7223                                             "descriptorSet #%u being bound is not compatible with overlapping descriptorSetLayout "
7224                                             "at index %u of pipelineLayout 0x%" PRIxLEAST64 " due to: %s",
7225                                             i, i + firstSet, reinterpret_cast<uint64_t &>(layout), errorString.c_str());
7226                    }
7227
7228                    auto setDynamicDescriptorCount = pSet->GetDynamicDescriptorCount();
7229
7230                    pCB->lastBound[pipelineBindPoint].dynamicOffsets[firstSet + i].clear();
7231
7232                    if (setDynamicDescriptorCount) {
7233                        // First make sure we won't overstep bounds of pDynamicOffsets array
7234                        if ((totalDynamicDescriptors + setDynamicDescriptorCount) > dynamicOffsetCount) {
7235                            skip_call |=
7236                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7237                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7238                                        DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
7239                                        "descriptorSet #%u (0x%" PRIxLEAST64
7240                                        ") requires %u dynamicOffsets, but only %u dynamicOffsets are left in pDynamicOffsets "
7241                                        "array. There must be one dynamic offset for each dynamic descriptor being bound.",
7242                                        i, (uint64_t)pDescriptorSets[i], pSet->GetDynamicDescriptorCount(),
7243                                        (dynamicOffsetCount - totalDynamicDescriptors));
7244                        } else { // Validate and store dynamic offsets with the set
7245                            // Validate Dynamic Offset Minimums
7246                            uint32_t cur_dyn_offset = totalDynamicDescriptors;
7247                            for (uint32_t d = 0; d < pSet->GetTotalDescriptorCount(); d++) {
7248                                if (pSet->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
7249                                    if (vk_safe_modulo(
7250                                            pDynamicOffsets[cur_dyn_offset],
7251                                            dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment) != 0) {
7252                                        skip_call |= log_msg(
7253                                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7254                                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
7255                                            DRAWSTATE_INVALID_UNIFORM_BUFFER_OFFSET, "DS",
7256                                            "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
7257                                            "device limit minUniformBufferOffsetAlignment 0x%" PRIxLEAST64,
7258                                            cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
7259                                            dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment);
7260                                    }
7261                                    cur_dyn_offset++;
7262                                } else if (pSet->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
7263                                    if (vk_safe_modulo(
7264                                            pDynamicOffsets[cur_dyn_offset],
7265                                            dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment) != 0) {
7266                                        skip_call |= log_msg(
7267                                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7268                                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
7269                                            DRAWSTATE_INVALID_STORAGE_BUFFER_OFFSET, "DS",
7270                                            "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
7271                                            "device limit minStorageBufferOffsetAlignment 0x%" PRIxLEAST64,
7272                                            cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
7273                                            dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment);
7274                                    }
7275                                    cur_dyn_offset++;
7276                                }
7277                            }
7278
7279                            pCB->lastBound[pipelineBindPoint].dynamicOffsets[firstSet + i] =
7280                                std::vector<uint32_t>(pDynamicOffsets + totalDynamicDescriptors,
7281                                                      pDynamicOffsets + totalDynamicDescriptors + setDynamicDescriptorCount);
7282                            // Keep running total of dynamic descriptor count to verify at the end
7283                            totalDynamicDescriptors += setDynamicDescriptorCount;
7284
7285                        }
7286                    }
7287                } else {
7288                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7289                                         VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7290                                         DRAWSTATE_INVALID_SET, "DS", "Attempt to bind DS 0x%" PRIxLEAST64 " that doesn't exist!",
7291                                         (uint64_t)pDescriptorSets[i]);
7292                }
7293                skip_call |= addCmd(dev_data, pCB, CMD_BINDDESCRIPTORSETS, "vkCmdBindDescriptorSets()");
7294                // For any previously bound sets, need to set them to "invalid" if they were disturbed by this update
7295                if (firstSet > 0) { // Check set #s below the first bound set
7296                    for (uint32_t i = 0; i < firstSet; ++i) {
7297                        if (pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] &&
7298                            !verify_set_layout_compatibility(dev_data, pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i],
7299                                                             pipeline_layout, i, errorString)) {
7300                            skip_call |= log_msg(
7301                                dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7302                                VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
7303                                (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], __LINE__, DRAWSTATE_NONE, "DS",
7304                                "DescriptorSetDS 0x%" PRIxLEAST64
7305                                " previously bound as set #%u was disturbed by newly bound pipelineLayout (0x%" PRIxLEAST64 ")",
7306                                (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], i, (uint64_t)layout);
7307                            pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] = VK_NULL_HANDLE;
7308                        }
7309                    }
7310                }
7311                // Check if newly last bound set invalidates any remaining bound sets
7312                if ((pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size() - 1) > (lastSetIndex)) {
7313                    if (oldFinalBoundSet &&
7314                        !verify_set_layout_compatibility(dev_data, oldFinalBoundSet, pipeline_layout, lastSetIndex, errorString)) {
7315                        auto old_set = oldFinalBoundSet->GetSet();
7316                        skip_call |=
7317                            log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7318                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, reinterpret_cast<uint64_t &>(old_set), __LINE__,
7319                                    DRAWSTATE_NONE, "DS", "DescriptorSetDS 0x%" PRIxLEAST64
7320                                                          " previously bound as set #%u is incompatible with set 0x%" PRIxLEAST64
7321                                                          " newly bound as set #%u so set #%u and any subsequent sets were "
7322                                                          "disturbed by newly bound pipelineLayout (0x%" PRIxLEAST64 ")",
7323                                    reinterpret_cast<uint64_t &>(old_set), lastSetIndex,
7324                                    (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex], lastSetIndex,
7325                                    lastSetIndex + 1, (uint64_t)layout);
7326                        pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1);
7327                    }
7328                }
7329            }
7330            //  dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound
7331            if (totalDynamicDescriptors != dynamicOffsetCount) {
7332                skip_call |=
7333                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7334                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
7335                            "Attempting to bind %u descriptorSets with %u dynamic descriptors, but dynamicOffsetCount "
7336                            "is %u. It should exactly match the number of dynamic descriptors.",
7337                            setCount, totalDynamicDescriptors, dynamicOffsetCount);
7338            }
7339        } else {
7340            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindDescriptorSets()");
7341        }
7342    }
7343    lock.unlock();
7344    if (!skip_call)
7345        dev_data->device_dispatch_table->CmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet, setCount,
7346                                                               pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
7347}
7348
7349VKAPI_ATTR void VKAPI_CALL
7350CmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType) {
7351    bool skip_call = false;
7352    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7353    // TODO : Somewhere need to verify that IBs have correct usage state flagged
7354    std::unique_lock<std::mutex> lock(global_lock);
7355
7356    auto buff_node = getBufferNode(dev_data, buffer);
7357    auto cb_node = getCBNode(dev_data, commandBuffer);
7358    if (cb_node && buff_node) {
7359        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buff_node, "vkCmdBindIndexBuffer()");
7360        std::function<bool()> function = [=]() {
7361            return ValidateBufferMemoryIsValid(dev_data, buff_node, "vkCmdBindIndexBuffer()");
7362        };
7363        cb_node->validate_functions.push_back(function);
7364        skip_call |= addCmd(dev_data, cb_node, CMD_BINDINDEXBUFFER, "vkCmdBindIndexBuffer()");
7365        VkDeviceSize offset_align = 0;
7366        switch (indexType) {
7367        case VK_INDEX_TYPE_UINT16:
7368            offset_align = 2;
7369            break;
7370        case VK_INDEX_TYPE_UINT32:
7371            offset_align = 4;
7372            break;
7373        default:
7374            // ParamChecker should catch bad enum, we'll also throw alignment error below if offset_align stays 0
7375            break;
7376        }
7377        if (!offset_align || (offset % offset_align)) {
7378            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7379                                 DRAWSTATE_VTX_INDEX_ALIGNMENT_ERROR, "DS",
7380                                 "vkCmdBindIndexBuffer() offset (0x%" PRIxLEAST64 ") does not fall on alignment (%s) boundary.",
7381                                 offset, string_VkIndexType(indexType));
7382        }
7383        cb_node->status |= CBSTATUS_INDEX_BUFFER_BOUND;
7384    } else {
7385        assert(0);
7386    }
7387    lock.unlock();
7388    if (!skip_call)
7389        dev_data->device_dispatch_table->CmdBindIndexBuffer(commandBuffer, buffer, offset, indexType);
7390}
7391
7392void updateResourceTracking(GLOBAL_CB_NODE *pCB, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers) {
7393    uint32_t end = firstBinding + bindingCount;
7394    if (pCB->currentDrawData.buffers.size() < end) {
7395        pCB->currentDrawData.buffers.resize(end);
7396    }
7397    for (uint32_t i = 0; i < bindingCount; ++i) {
7398        pCB->currentDrawData.buffers[i + firstBinding] = pBuffers[i];
7399    }
7400}
7401
7402static inline void updateResourceTrackingOnDraw(GLOBAL_CB_NODE *pCB) { pCB->drawData.push_back(pCB->currentDrawData); }
7403
7404VKAPI_ATTR void VKAPI_CALL CmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding,
7405                                                uint32_t bindingCount, const VkBuffer *pBuffers,
7406                                                const VkDeviceSize *pOffsets) {
7407    bool skip_call = false;
7408    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7409    // TODO : Somewhere need to verify that VBs have correct usage state flagged
7410    std::unique_lock<std::mutex> lock(global_lock);
7411
7412    auto cb_node = getCBNode(dev_data, commandBuffer);
7413    if (cb_node) {
7414        for (uint32_t i = 0; i < bindingCount; ++i) {
7415            auto buff_node = getBufferNode(dev_data, pBuffers[i]);
7416            assert(buff_node);
7417            skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buff_node, "vkCmdBindVertexBuffers()");
7418            std::function<bool()> function = [=]() {
7419                return ValidateBufferMemoryIsValid(dev_data, buff_node, "vkCmdBindVertexBuffers()");
7420            };
7421            cb_node->validate_functions.push_back(function);
7422        }
7423        addCmd(dev_data, cb_node, CMD_BINDVERTEXBUFFER, "vkCmdBindVertexBuffer()");
7424        updateResourceTracking(cb_node, firstBinding, bindingCount, pBuffers);
7425    } else {
7426        skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindVertexBuffer()");
7427    }
7428    lock.unlock();
7429    if (!skip_call)
7430        dev_data->device_dispatch_table->CmdBindVertexBuffers(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets);
7431}
7432
7433/* expects global_lock to be held by caller */
7434static bool markStoreImagesAndBuffersAsWritten(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
7435    bool skip_call = false;
7436
7437    for (auto imageView : pCB->updateImages) {
7438        auto view_state = getImageViewState(dev_data, imageView);
7439        if (!view_state)
7440            continue;
7441
7442        auto img_node = getImageNode(dev_data, view_state->create_info.image);
7443        assert(img_node);
7444        std::function<bool()> function = [=]() {
7445            SetImageMemoryValid(dev_data, img_node, true);
7446            return false;
7447        };
7448        pCB->validate_functions.push_back(function);
7449    }
7450    for (auto buffer : pCB->updateBuffers) {
7451        auto buff_node = getBufferNode(dev_data, buffer);
7452        assert(buff_node);
7453        std::function<bool()> function = [=]() {
7454            SetBufferMemoryValid(dev_data, buff_node, true);
7455            return false;
7456        };
7457        pCB->validate_functions.push_back(function);
7458    }
7459    return skip_call;
7460}
7461
7462VKAPI_ATTR void VKAPI_CALL CmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
7463                                   uint32_t firstVertex, uint32_t firstInstance) {
7464    bool skip_call = false;
7465    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7466    std::unique_lock<std::mutex> lock(global_lock);
7467    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7468    if (pCB) {
7469        skip_call |= addCmd(dev_data, pCB, CMD_DRAW, "vkCmdDraw()");
7470        pCB->drawCount[DRAW]++;
7471        skip_call |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDraw");
7472        skip_call |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
7473        // TODO : Need to pass commandBuffer as srcObj here
7474        skip_call |=
7475            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7476                    __LINE__, DRAWSTATE_NONE, "DS", "vkCmdDraw() call 0x%" PRIx64 ", reporting DS state:", g_drawCount[DRAW]++);
7477        skip_call |= synchAndPrintDSConfig(dev_data, commandBuffer);
7478        if (!skip_call) {
7479            updateResourceTrackingOnDraw(pCB);
7480        }
7481        skip_call |= outsideRenderPass(dev_data, pCB, "vkCmdDraw");
7482    }
7483    lock.unlock();
7484    if (!skip_call)
7485        dev_data->device_dispatch_table->CmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
7486}
7487
7488VKAPI_ATTR void VKAPI_CALL CmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount,
7489                                          uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset,
7490                                                            uint32_t firstInstance) {
7491    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7492    bool skip_call = false;
7493    std::unique_lock<std::mutex> lock(global_lock);
7494    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7495    if (pCB) {
7496        skip_call |= addCmd(dev_data, pCB, CMD_DRAWINDEXED, "vkCmdDrawIndexed()");
7497        pCB->drawCount[DRAW_INDEXED]++;
7498        skip_call |= validate_and_update_draw_state(dev_data, pCB, true, VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDrawIndexed");
7499        skip_call |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
7500        // TODO : Need to pass commandBuffer as srcObj here
7501        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
7502                             VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS",
7503                             "vkCmdDrawIndexed() call 0x%" PRIx64 ", reporting DS state:", g_drawCount[DRAW_INDEXED]++);
7504        skip_call |= synchAndPrintDSConfig(dev_data, commandBuffer);
7505        if (!skip_call) {
7506            updateResourceTrackingOnDraw(pCB);
7507        }
7508        skip_call |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndexed");
7509    }
7510    lock.unlock();
7511    if (!skip_call)
7512        dev_data->device_dispatch_table->CmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset,
7513                                                        firstInstance);
7514}
7515
7516VKAPI_ATTR void VKAPI_CALL
7517CmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
7518    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7519    bool skip_call = false;
7520    std::unique_lock<std::mutex> lock(global_lock);
7521
7522    auto cb_node = getCBNode(dev_data, commandBuffer);
7523    auto buff_node = getBufferNode(dev_data, buffer);
7524    if (cb_node && buff_node) {
7525        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buff_node, "vkCmdDrawIndirect()");
7526        AddCommandBufferBindingBuffer(dev_data, cb_node, buff_node);
7527        skip_call |= addCmd(dev_data, cb_node, CMD_DRAWINDIRECT, "vkCmdDrawIndirect()");
7528        cb_node->drawCount[DRAW_INDIRECT]++;
7529        skip_call |= validate_and_update_draw_state(dev_data, cb_node, false, VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDrawIndirect");
7530        skip_call |= markStoreImagesAndBuffersAsWritten(dev_data, cb_node);
7531        // TODO : Need to pass commandBuffer as srcObj here
7532        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
7533                             VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS",
7534                             "vkCmdDrawIndirect() call 0x%" PRIx64 ", reporting DS state:", g_drawCount[DRAW_INDIRECT]++);
7535        skip_call |= synchAndPrintDSConfig(dev_data, commandBuffer);
7536        if (!skip_call) {
7537            updateResourceTrackingOnDraw(cb_node);
7538        }
7539        skip_call |= outsideRenderPass(dev_data, cb_node, "vkCmdDrawIndirect()");
7540    } else {
7541        assert(0);
7542    }
7543    lock.unlock();
7544    if (!skip_call)
7545        dev_data->device_dispatch_table->CmdDrawIndirect(commandBuffer, buffer, offset, count, stride);
7546}
7547
7548VKAPI_ATTR void VKAPI_CALL
7549CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
7550    bool skip_call = false;
7551    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7552    std::unique_lock<std::mutex> lock(global_lock);
7553
7554    auto cb_node = getCBNode(dev_data, commandBuffer);
7555    auto buff_node = getBufferNode(dev_data, buffer);
7556    if (cb_node && buff_node) {
7557        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buff_node, "vkCmdDrawIndexedIndirect()");
7558        AddCommandBufferBindingBuffer(dev_data, cb_node, buff_node);
7559        skip_call |= addCmd(dev_data, cb_node, CMD_DRAWINDEXEDINDIRECT, "vkCmdDrawIndexedIndirect()");
7560        cb_node->drawCount[DRAW_INDEXED_INDIRECT]++;
7561        skip_call |=
7562            validate_and_update_draw_state(dev_data, cb_node, true, VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDrawIndexedIndirect");
7563        skip_call |= markStoreImagesAndBuffersAsWritten(dev_data, cb_node);
7564        // TODO : Need to pass commandBuffer as srcObj here
7565        skip_call |=
7566            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7567                    __LINE__, DRAWSTATE_NONE, "DS", "vkCmdDrawIndexedIndirect() call 0x%" PRIx64 ", reporting DS state:",
7568                    g_drawCount[DRAW_INDEXED_INDIRECT]++);
7569        skip_call |= synchAndPrintDSConfig(dev_data, commandBuffer);
7570        if (!skip_call) {
7571            updateResourceTrackingOnDraw(cb_node);
7572        }
7573        skip_call |= outsideRenderPass(dev_data, cb_node, "vkCmdDrawIndexedIndirect()");
7574    } else {
7575        assert(0);
7576    }
7577    lock.unlock();
7578    if (!skip_call)
7579        dev_data->device_dispatch_table->CmdDrawIndexedIndirect(commandBuffer, buffer, offset, count, stride);
7580}
7581
7582VKAPI_ATTR void VKAPI_CALL CmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
7583    bool skip_call = false;
7584    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7585    std::unique_lock<std::mutex> lock(global_lock);
7586    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7587    if (pCB) {
7588        skip_call |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_COMPUTE, "vkCmdDispatch");
7589        skip_call |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
7590        skip_call |= addCmd(dev_data, pCB, CMD_DISPATCH, "vkCmdDispatch()");
7591        skip_call |= insideRenderPass(dev_data, pCB, "vkCmdDispatch");
7592    }
7593    lock.unlock();
7594    if (!skip_call)
7595        dev_data->device_dispatch_table->CmdDispatch(commandBuffer, x, y, z);
7596}
7597
7598VKAPI_ATTR void VKAPI_CALL
7599CmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
7600    bool skip_call = false;
7601    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7602    std::unique_lock<std::mutex> lock(global_lock);
7603
7604    auto cb_node = getCBNode(dev_data, commandBuffer);
7605    auto buff_node = getBufferNode(dev_data, buffer);
7606    if (cb_node && buff_node) {
7607        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buff_node, "vkCmdDispatchIndirect()");
7608        AddCommandBufferBindingBuffer(dev_data, cb_node, buff_node);
7609        skip_call |=
7610            validate_and_update_draw_state(dev_data, cb_node, false, VK_PIPELINE_BIND_POINT_COMPUTE, "vkCmdDispatchIndirect");
7611        skip_call |= markStoreImagesAndBuffersAsWritten(dev_data, cb_node);
7612        skip_call |= addCmd(dev_data, cb_node, CMD_DISPATCHINDIRECT, "vkCmdDispatchIndirect()");
7613        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdDispatchIndirect()");
7614    }
7615    lock.unlock();
7616    if (!skip_call)
7617        dev_data->device_dispatch_table->CmdDispatchIndirect(commandBuffer, buffer, offset);
7618}
7619
7620VKAPI_ATTR void VKAPI_CALL CmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
7621                                         uint32_t regionCount, const VkBufferCopy *pRegions) {
7622    bool skip_call = false;
7623    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7624    std::unique_lock<std::mutex> lock(global_lock);
7625
7626    auto cb_node = getCBNode(dev_data, commandBuffer);
7627    auto src_buff_node = getBufferNode(dev_data, srcBuffer);
7628    auto dst_buff_node = getBufferNode(dev_data, dstBuffer);
7629    if (cb_node && src_buff_node && dst_buff_node) {
7630        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, src_buff_node, "vkCmdCopyBuffer()");
7631        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_node, "vkCmdCopyBuffer()");
7632        // Update bindings between buffers and cmd buffer
7633        AddCommandBufferBindingBuffer(dev_data, cb_node, src_buff_node);
7634        AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_node);
7635        // Validate that SRC & DST buffers have correct usage flags set
7636        skip_call |= ValidateBufferUsageFlags(dev_data, src_buff_node, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true, "vkCmdCopyBuffer()",
7637                                              "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
7638        skip_call |= ValidateBufferUsageFlags(dev_data, dst_buff_node, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "vkCmdCopyBuffer()",
7639                                              "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
7640
7641        std::function<bool()> function = [=]() {
7642            return ValidateBufferMemoryIsValid(dev_data, src_buff_node, "vkCmdCopyBuffer()");
7643        };
7644        cb_node->validate_functions.push_back(function);
7645        function = [=]() {
7646            SetBufferMemoryValid(dev_data, dst_buff_node, true);
7647            return false;
7648        };
7649        cb_node->validate_functions.push_back(function);
7650
7651        skip_call |= addCmd(dev_data, cb_node, CMD_COPYBUFFER, "vkCmdCopyBuffer()");
7652        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyBuffer()");
7653    } else {
7654        // Param_checker will flag errors on invalid objects, just assert here as debugging aid
7655        assert(0);
7656    }
7657    lock.unlock();
7658    if (!skip_call)
7659        dev_data->device_dispatch_table->CmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions);
7660}
7661
7662static bool VerifySourceImageLayout(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, VkImage srcImage,
7663                                    VkImageSubresourceLayers subLayers, VkImageLayout srcImageLayout) {
7664    bool skip_call = false;
7665
7666    for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
7667        uint32_t layer = i + subLayers.baseArrayLayer;
7668        VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
7669        IMAGE_CMD_BUF_LAYOUT_NODE node;
7670        if (!FindLayout(cb_node, srcImage, sub, node)) {
7671            SetLayout(cb_node, srcImage, sub, IMAGE_CMD_BUF_LAYOUT_NODE(srcImageLayout, srcImageLayout));
7672            continue;
7673        }
7674        if (node.layout != srcImageLayout) {
7675            // TODO: Improve log message in the next pass
7676            skip_call |=
7677                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7678                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose source layout is %s "
7679                                                                        "and doesn't match the current layout %s.",
7680                        string_VkImageLayout(srcImageLayout), string_VkImageLayout(node.layout));
7681        }
7682    }
7683    if (srcImageLayout != VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL) {
7684        if (srcImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
7685            // TODO : Can we deal with image node from the top of call tree and avoid map look-up here?
7686            auto image_node = getImageNode(dev_data, srcImage);
7687            if (image_node->createInfo.tiling != VK_IMAGE_TILING_LINEAR) {
7688                // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
7689                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7690                                     (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
7691                                     "Layout for input image should be TRANSFER_SRC_OPTIMAL instead of GENERAL.");
7692            }
7693        } else {
7694            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7695                                 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Layout for input image is %s but can only be "
7696                                                                       "TRANSFER_SRC_OPTIMAL or GENERAL.",
7697                                 string_VkImageLayout(srcImageLayout));
7698        }
7699    }
7700    return skip_call;
7701}
7702
7703static bool VerifyDestImageLayout(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, VkImage destImage,
7704                                  VkImageSubresourceLayers subLayers, VkImageLayout destImageLayout) {
7705    bool skip_call = false;
7706
7707    for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
7708        uint32_t layer = i + subLayers.baseArrayLayer;
7709        VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
7710        IMAGE_CMD_BUF_LAYOUT_NODE node;
7711        if (!FindLayout(cb_node, destImage, sub, node)) {
7712            SetLayout(cb_node, destImage, sub, IMAGE_CMD_BUF_LAYOUT_NODE(destImageLayout, destImageLayout));
7713            continue;
7714        }
7715        if (node.layout != destImageLayout) {
7716            skip_call |=
7717                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7718                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose dest layout is %s and "
7719                                                                        "doesn't match the current layout %s.",
7720                        string_VkImageLayout(destImageLayout), string_VkImageLayout(node.layout));
7721        }
7722    }
7723    if (destImageLayout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
7724        if (destImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
7725            auto image_node = getImageNode(dev_data, destImage);
7726            if (image_node->createInfo.tiling != VK_IMAGE_TILING_LINEAR) {
7727                // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
7728                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7729                                     (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
7730                                     "Layout for output image should be TRANSFER_DST_OPTIMAL instead of GENERAL.");
7731            }
7732        } else {
7733            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7734                                 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Layout for output image is %s but can only be "
7735                                                                       "TRANSFER_DST_OPTIMAL or GENERAL.",
7736                                 string_VkImageLayout(destImageLayout));
7737        }
7738    }
7739    return skip_call;
7740}
7741
7742// Test if two VkExtent3D structs are equivalent
7743static inline bool IsExtentEqual(const VkExtent3D *extent, const VkExtent3D *other_extent) {
7744    bool result = true;
7745    if ((extent->width != other_extent->width) || (extent->height != other_extent->height) ||
7746        (extent->depth != other_extent->depth)) {
7747        result = false;
7748    }
7749    return result;
7750}
7751
7752// Returns the image extent of a specific subresource.
7753static inline VkExtent3D GetImageSubresourceExtent(const IMAGE_NODE *img, const VkImageSubresourceLayers *subresource) {
7754    const uint32_t mip = subresource->mipLevel;
7755    VkExtent3D extent = img->createInfo.extent;
7756    extent.width = std::max(1U, extent.width >> mip);
7757    extent.height = std::max(1U, extent.height >> mip);
7758    extent.depth = std::max(1U, extent.depth >> mip);
7759    return extent;
7760}
7761
7762// Test if the extent argument has all dimensions set to 0.
7763static inline bool IsExtentZero(const VkExtent3D *extent) {
7764    return ((extent->width == 0) && (extent->height == 0) && (extent->depth == 0));
7765}
7766
7767// Returns the image transfer granularity for a specific image scaled by compressed block size if necessary.
7768static inline VkExtent3D GetScaledItg(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const IMAGE_NODE *img) {
7769    // Default to (0, 0, 0) granularity in case we can't find the real granularity for the physical device.
7770    VkExtent3D granularity = { 0, 0, 0 };
7771    auto pPool = getCommandPoolNode(dev_data, cb_node->createInfo.commandPool);
7772    if (pPool) {
7773        granularity = dev_data->phys_dev_properties.queue_family_properties[pPool->queueFamilyIndex].minImageTransferGranularity;
7774        if (vk_format_is_compressed(img->createInfo.format)) {
7775            auto block_size = vk_format_compressed_block_size(img->createInfo.format);
7776            granularity.width *= block_size.width;
7777            granularity.height *= block_size.height;
7778        }
7779    }
7780    return granularity;
7781}
7782
7783// Test elements of a VkExtent3D structure against alignment constraints contained in another VkExtent3D structure
7784static inline bool IsExtentAligned(const VkExtent3D *extent, const VkExtent3D *granularity) {
7785    bool valid = true;
7786    if ((vk_safe_modulo(extent->depth, granularity->depth) != 0) || (vk_safe_modulo(extent->width, granularity->width) != 0) ||
7787        (vk_safe_modulo(extent->height, granularity->height) != 0)) {
7788        valid = false;
7789    }
7790    return valid;
7791}
7792
7793// Check elements of a VkOffset3D structure against a queue family's Image Transfer Granularity values
7794static inline bool CheckItgOffset(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const VkOffset3D *offset,
7795                                  const VkExtent3D *granularity, const uint32_t i, const char *function, const char *member) {
7796    bool skip = false;
7797    VkExtent3D offset_extent = {};
7798    offset_extent.width = static_cast<uint32_t>(abs(offset->x));
7799    offset_extent.height = static_cast<uint32_t>(abs(offset->y));
7800    offset_extent.depth = static_cast<uint32_t>(abs(offset->z));
7801    if (IsExtentZero(granularity)) {
7802        // If the queue family image transfer granularity is (0, 0, 0), then the offset must always be (0, 0, 0)
7803        if (IsExtentZero(&offset_extent) == false) {
7804            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7805                            DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
7806                            "%s: pRegion[%d].%s (x=%d, y=%d, z=%d) must be (x=0, y=0, z=0) "
7807                            "when the command buffer's queue family image transfer granularity is (w=0, h=0, d=0).",
7808                            function, i, member, offset->x, offset->y, offset->z);
7809        }
7810    } else {
7811        // If the queue family image transfer granularity is not (0, 0, 0), then the offset dimensions must always be even
7812        // integer multiples of the image transfer granularity.
7813        if (IsExtentAligned(&offset_extent, granularity) == false) {
7814            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7815                            DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
7816                            "%s: pRegion[%d].%s (x=%d, y=%d, z=%d) dimensions must be even integer "
7817                            "multiples of this command buffer's queue family image transfer granularity (w=%d, h=%d, d=%d).",
7818                            function, i, member, offset->x, offset->y, offset->z, granularity->width, granularity->height,
7819                            granularity->depth);
7820        }
7821    }
7822    return skip;
7823}
7824
7825// Check elements of a VkExtent3D structure against a queue family's Image Transfer Granularity values
7826static inline bool CheckItgExtent(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const VkExtent3D *extent,
7827                                  const VkOffset3D *offset, const VkExtent3D *granularity, const VkExtent3D *subresource_extent,
7828                                  const uint32_t i, const char *function, const char *member) {
7829    bool skip = false;
7830    if (IsExtentZero(granularity)) {
7831        // If the queue family image transfer granularity is (0, 0, 0), then the extent must always match the image
7832        // subresource extent.
7833        if (IsExtentEqual(extent, subresource_extent) == false) {
7834            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7835                            DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
7836                            "%s: pRegion[%d].%s (w=%d, h=%d, d=%d) must match the image subresource extents (w=%d, h=%d, d=%d) "
7837                            "when the command buffer's queue family image transfer granularity is (w=0, h=0, d=0).",
7838                            function, i, member, extent->width, extent->height, extent->depth, subresource_extent->width,
7839                            subresource_extent->height, subresource_extent->depth);
7840        }
7841    } else {
7842        // If the queue family image transfer granularity is not (0, 0, 0), then the extent dimensions must always be even
7843        // integer multiples of the image transfer granularity or the offset + extent dimensions must always match the image
7844        // subresource extent dimensions.
7845        VkExtent3D offset_extent_sum = {};
7846        offset_extent_sum.width = static_cast<uint32_t>(abs(offset->x)) + extent->width;
7847        offset_extent_sum.height = static_cast<uint32_t>(abs(offset->y)) + extent->height;
7848        offset_extent_sum.depth = static_cast<uint32_t>(abs(offset->z)) + extent->depth;
7849        if ((IsExtentAligned(extent, granularity) == false) && (IsExtentEqual(&offset_extent_sum, subresource_extent) == false)) {
7850            skip |=
7851                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7852                        DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
7853                        "%s: pRegion[%d].%s (w=%d, h=%d, d=%d) dimensions must be even integer multiples of this command buffer's "
7854                        "queue family image transfer granularity (w=%d, h=%d, d=%d) or offset (x=%d, y=%d, z=%d) + "
7855                        "extent (w=%d, h=%d, d=%d) must match the image subresource extents (w=%d, h=%d, d=%d).",
7856                        function, i, member, extent->width, extent->height, extent->depth, granularity->width, granularity->height,
7857                        granularity->depth, offset->x, offset->y, offset->z, extent->width, extent->height, extent->depth,
7858                        subresource_extent->width, subresource_extent->height, subresource_extent->depth);
7859        }
7860    }
7861    return skip;
7862}
7863
7864// Check a uint32_t width or stride value against a queue family's Image Transfer Granularity width value
7865static inline bool CheckItgInt(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const uint32_t value,
7866                               const uint32_t granularity, const uint32_t i, const char *function, const char *member) {
7867    bool skip = false;
7868    if (vk_safe_modulo(value, granularity) != 0) {
7869        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7870                        DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
7871                        "%s: pRegion[%d].%s (%d) must be an even integer multiple of this command buffer's queue family image "
7872                        "transfer granularity width (%d).",
7873                        function, i, member, value, granularity);
7874    }
7875    return skip;
7876}
7877
7878// Check a VkDeviceSize value against a queue family's Image Transfer Granularity width value
7879static inline bool CheckItgSize(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const VkDeviceSize value,
7880                                const uint32_t granularity, const uint32_t i, const char *function, const char *member) {
7881    bool skip = false;
7882    if (vk_safe_modulo(value, granularity) != 0) {
7883        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7884                        DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
7885                        "%s: pRegion[%d].%s (%" PRIdLEAST64
7886                        ") must be an even integer multiple of this command buffer's queue family image transfer "
7887                        "granularity width (%d).",
7888                        function, i, member, value, granularity);
7889    }
7890    return skip;
7891}
7892
7893// Check valid usage Image Tranfer Granularity requirements for elements of a VkImageCopy structure
7894static inline bool ValidateCopyImageTransferGranularityRequirements(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node,
7895                                                                    const IMAGE_NODE *img, const VkImageCopy *region,
7896                                                                    const uint32_t i, const char *function) {
7897    bool skip = false;
7898    VkExtent3D granularity = GetScaledItg(dev_data, cb_node, img);
7899    skip |= CheckItgOffset(dev_data, cb_node, &region->srcOffset, &granularity, i, function, "srcOffset");
7900    skip |= CheckItgOffset(dev_data, cb_node, &region->dstOffset, &granularity, i, function, "dstOffset");
7901    VkExtent3D subresource_extent = GetImageSubresourceExtent(img, &region->dstSubresource);
7902    skip |= CheckItgExtent(dev_data, cb_node, &region->extent, &region->dstOffset, &granularity, &subresource_extent, i, function,
7903                           "extent");
7904    return skip;
7905}
7906
7907// Check valid usage Image Tranfer Granularity requirements for elements of a VkBufferImageCopy structure
7908static inline bool ValidateCopyBufferImageTransferGranularityRequirements(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node,
7909                                                                          const IMAGE_NODE *img, const VkBufferImageCopy *region,
7910                                                                          const uint32_t i, const char *function) {
7911    bool skip = false;
7912    VkExtent3D granularity = GetScaledItg(dev_data, cb_node, img);
7913    skip |= CheckItgSize(dev_data, cb_node, region->bufferOffset, granularity.width, i, function, "bufferOffset");
7914    skip |= CheckItgInt(dev_data, cb_node, region->bufferRowLength, granularity.width, i, function, "bufferRowLength");
7915    skip |= CheckItgInt(dev_data, cb_node, region->bufferImageHeight, granularity.width, i, function, "bufferImageHeight");
7916    skip |= CheckItgOffset(dev_data, cb_node, &region->imageOffset, &granularity, i, function, "imageOffset");
7917    VkExtent3D subresource_extent = GetImageSubresourceExtent(img, &region->imageSubresource);
7918    skip |= CheckItgExtent(dev_data, cb_node, &region->imageExtent, &region->imageOffset, &granularity, &subresource_extent, i,
7919                           function, "imageExtent");
7920    return skip;
7921}
7922
7923VKAPI_ATTR void VKAPI_CALL
7924CmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
7925             VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy *pRegions) {
7926    bool skip_call = false;
7927    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7928    std::unique_lock<std::mutex> lock(global_lock);
7929
7930    auto cb_node = getCBNode(dev_data, commandBuffer);
7931    auto src_img_node = getImageNode(dev_data, srcImage);
7932    auto dst_img_node = getImageNode(dev_data, dstImage);
7933    if (cb_node && src_img_node && dst_img_node) {
7934        skip_call |= ValidateMemoryIsBoundToImage(dev_data, src_img_node, "vkCmdCopyImage()");
7935        skip_call |= ValidateMemoryIsBoundToImage(dev_data, dst_img_node, "vkCmdCopyImage()");
7936        // Update bindings between images and cmd buffer
7937        AddCommandBufferBindingImage(dev_data, cb_node, src_img_node);
7938        AddCommandBufferBindingImage(dev_data, cb_node, dst_img_node);
7939        // Validate that SRC & DST images have correct usage flags set
7940        skip_call |= ValidateImageUsageFlags(dev_data, src_img_node, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true, "vkCmdCopyImage()",
7941                                             "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
7942        skip_call |= ValidateImageUsageFlags(dev_data, dst_img_node, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true, "vkCmdCopyImage()",
7943                                             "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
7944        std::function<bool()> function = [=]() { return ValidateImageMemoryIsValid(dev_data, src_img_node, "vkCmdCopyImage()"); };
7945        cb_node->validate_functions.push_back(function);
7946        function = [=]() {
7947            SetImageMemoryValid(dev_data, dst_img_node, true);
7948            return false;
7949        };
7950        cb_node->validate_functions.push_back(function);
7951
7952        skip_call |= addCmd(dev_data, cb_node, CMD_COPYIMAGE, "vkCmdCopyImage()");
7953        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyImage()");
7954        for (uint32_t i = 0; i < regionCount; ++i) {
7955            skip_call |= VerifySourceImageLayout(dev_data, cb_node, srcImage, pRegions[i].srcSubresource, srcImageLayout);
7956            skip_call |= VerifyDestImageLayout(dev_data, cb_node, dstImage, pRegions[i].dstSubresource, dstImageLayout);
7957            skip_call |= ValidateCopyImageTransferGranularityRequirements(dev_data, cb_node, dst_img_node, &pRegions[i], i,
7958                                                                          "vkCmdCopyImage()");
7959        }
7960    } else {
7961        assert(0);
7962    }
7963    lock.unlock();
7964    if (!skip_call)
7965        dev_data->device_dispatch_table->CmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
7966                                                      regionCount, pRegions);
7967}
7968
7969// Validate that an image's sampleCount matches the requirement for a specific API call
7970static inline bool ValidateImageSampleCount(layer_data *dev_data, IMAGE_NODE *image_node, VkSampleCountFlagBits sample_count,
7971                                            const char *location) {
7972    bool skip = false;
7973    if (image_node->createInfo.samples != sample_count) {
7974        skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
7975                       reinterpret_cast<uint64_t &>(image_node->image), 0, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
7976                       "%s for image 0x%" PRIxLEAST64 " was created with a sample count of %s but must be %s.", location,
7977                       reinterpret_cast<uint64_t &>(image_node->image),
7978                       string_VkSampleCountFlagBits(image_node->createInfo.samples), string_VkSampleCountFlagBits(sample_count));
7979    }
7980    return skip;
7981}
7982
7983VKAPI_ATTR void VKAPI_CALL
7984CmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
7985             VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit *pRegions, VkFilter filter) {
7986    bool skip_call = false;
7987    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7988    std::unique_lock<std::mutex> lock(global_lock);
7989
7990    auto cb_node = getCBNode(dev_data, commandBuffer);
7991    auto src_img_node = getImageNode(dev_data, srcImage);
7992    auto dst_img_node = getImageNode(dev_data, dstImage);
7993    if (cb_node && src_img_node && dst_img_node) {
7994        skip_call |= ValidateImageSampleCount(dev_data, src_img_node, VK_SAMPLE_COUNT_1_BIT, "vkCmdBlitImage(): srcImage");
7995        skip_call |= ValidateImageSampleCount(dev_data, dst_img_node, VK_SAMPLE_COUNT_1_BIT, "vkCmdBlitImage(): dstImage");
7996        skip_call |= ValidateMemoryIsBoundToImage(dev_data, src_img_node, "vkCmdBlitImage()");
7997        skip_call |= ValidateMemoryIsBoundToImage(dev_data, dst_img_node, "vkCmdBlitImage()");
7998        // Update bindings between images and cmd buffer
7999        AddCommandBufferBindingImage(dev_data, cb_node, src_img_node);
8000        AddCommandBufferBindingImage(dev_data, cb_node, dst_img_node);
8001        // Validate that SRC & DST images have correct usage flags set
8002        skip_call |= ValidateImageUsageFlags(dev_data, src_img_node, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true, "vkCmdBlitImage()",
8003                                             "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
8004        skip_call |= ValidateImageUsageFlags(dev_data, dst_img_node, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true, "vkCmdBlitImage()",
8005                                             "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
8006        std::function<bool()> function = [=]() { return ValidateImageMemoryIsValid(dev_data, src_img_node, "vkCmdBlitImage()"); };
8007        cb_node->validate_functions.push_back(function);
8008        function = [=]() {
8009            SetImageMemoryValid(dev_data, dst_img_node, true);
8010            return false;
8011        };
8012        cb_node->validate_functions.push_back(function);
8013
8014        skip_call |= addCmd(dev_data, cb_node, CMD_BLITIMAGE, "vkCmdBlitImage()");
8015        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdBlitImage()");
8016    } else {
8017        assert(0);
8018    }
8019    lock.unlock();
8020    if (!skip_call)
8021        dev_data->device_dispatch_table->CmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
8022                                                      regionCount, pRegions, filter);
8023}
8024
8025VKAPI_ATTR void VKAPI_CALL CmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer,
8026                                                VkImage dstImage, VkImageLayout dstImageLayout,
8027                                                uint32_t regionCount, const VkBufferImageCopy *pRegions) {
8028    bool skip_call = false;
8029    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8030    std::unique_lock<std::mutex> lock(global_lock);
8031
8032    auto cb_node = getCBNode(dev_data, commandBuffer);
8033    auto src_buff_node = getBufferNode(dev_data, srcBuffer);
8034    auto dst_img_node = getImageNode(dev_data, dstImage);
8035    if (cb_node && src_buff_node && dst_img_node) {
8036        skip_call |= ValidateImageSampleCount(dev_data, dst_img_node, VK_SAMPLE_COUNT_1_BIT, "vkCmdCopyBufferToImage(): dstImage");
8037        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, src_buff_node, "vkCmdCopyBufferToImage()");
8038        skip_call |= ValidateMemoryIsBoundToImage(dev_data, dst_img_node, "vkCmdCopyBufferToImage()");
8039        AddCommandBufferBindingBuffer(dev_data, cb_node, src_buff_node);
8040        AddCommandBufferBindingImage(dev_data, cb_node, dst_img_node);
8041        skip_call |= ValidateBufferUsageFlags(dev_data, src_buff_node, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true,
8042                                              "vkCmdCopyBufferToImage()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
8043        skip_call |= ValidateImageUsageFlags(dev_data, dst_img_node, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
8044                                             "vkCmdCopyBufferToImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
8045        std::function<bool()> function = [=]() {
8046            SetImageMemoryValid(dev_data, dst_img_node, true);
8047            return false;
8048        };
8049        cb_node->validate_functions.push_back(function);
8050        function = [=]() { return ValidateBufferMemoryIsValid(dev_data, src_buff_node, "vkCmdCopyBufferToImage()"); };
8051        cb_node->validate_functions.push_back(function);
8052
8053        skip_call |= addCmd(dev_data, cb_node, CMD_COPYBUFFERTOIMAGE, "vkCmdCopyBufferToImage()");
8054        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyBufferToImage()");
8055        for (uint32_t i = 0; i < regionCount; ++i) {
8056            skip_call |= VerifyDestImageLayout(dev_data, cb_node, dstImage, pRegions[i].imageSubresource, dstImageLayout);
8057            skip_call |= ValidateCopyBufferImageTransferGranularityRequirements(dev_data, cb_node, dst_img_node, &pRegions[i], i,
8058                                                                                "vkCmdCopyBufferToImage()");
8059        }
8060    } else {
8061        assert(0);
8062    }
8063    lock.unlock();
8064    if (!skip_call)
8065        dev_data->device_dispatch_table->CmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount,
8066                                                              pRegions);
8067}
8068
8069VKAPI_ATTR void VKAPI_CALL CmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage,
8070                                                VkImageLayout srcImageLayout, VkBuffer dstBuffer,
8071                                                uint32_t regionCount, const VkBufferImageCopy *pRegions) {
8072    bool skip_call = false;
8073    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8074    std::unique_lock<std::mutex> lock(global_lock);
8075
8076    auto cb_node = getCBNode(dev_data, commandBuffer);
8077    auto src_img_node = getImageNode(dev_data, srcImage);
8078    auto dst_buff_node = getBufferNode(dev_data, dstBuffer);
8079    if (cb_node && src_img_node && dst_buff_node) {
8080        skip_call |= ValidateImageSampleCount(dev_data, src_img_node, VK_SAMPLE_COUNT_1_BIT, "vkCmdCopyImageToBuffer(): srcImage");
8081        skip_call |= ValidateMemoryIsBoundToImage(dev_data, src_img_node, "vkCmdCopyImageToBuffer()");
8082        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_node, "vkCmdCopyImageToBuffer()");
8083        // Update bindings between buffer/image and cmd buffer
8084        AddCommandBufferBindingImage(dev_data, cb_node, src_img_node);
8085        AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_node);
8086        // Validate that SRC image & DST buffer have correct usage flags set
8087        skip_call |= ValidateImageUsageFlags(dev_data, src_img_node, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
8088                                             "vkCmdCopyImageToBuffer()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
8089        skip_call |= ValidateBufferUsageFlags(dev_data, dst_buff_node, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8090                                              "vkCmdCopyImageToBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8091        std::function<bool()> function = [=]() {
8092            return ValidateImageMemoryIsValid(dev_data, src_img_node, "vkCmdCopyImageToBuffer()");
8093        };
8094        cb_node->validate_functions.push_back(function);
8095        function = [=]() {
8096            SetBufferMemoryValid(dev_data, dst_buff_node, true);
8097            return false;
8098        };
8099        cb_node->validate_functions.push_back(function);
8100
8101        skip_call |= addCmd(dev_data, cb_node, CMD_COPYIMAGETOBUFFER, "vkCmdCopyImageToBuffer()");
8102        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyImageToBuffer()");
8103        for (uint32_t i = 0; i < regionCount; ++i) {
8104            skip_call |= VerifySourceImageLayout(dev_data, cb_node, srcImage, pRegions[i].imageSubresource, srcImageLayout);
8105            skip_call |= ValidateCopyBufferImageTransferGranularityRequirements(dev_data, cb_node, src_img_node, &pRegions[i], i,
8106                                                                                "CmdCopyImageToBuffer");
8107        }
8108    } else {
8109        assert(0);
8110    }
8111    lock.unlock();
8112    if (!skip_call)
8113        dev_data->device_dispatch_table->CmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount,
8114                                                              pRegions);
8115}
8116
8117VKAPI_ATTR void VKAPI_CALL CmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer,
8118                                           VkDeviceSize dstOffset, VkDeviceSize dataSize, const uint32_t *pData) {
8119    bool skip_call = false;
8120    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8121    std::unique_lock<std::mutex> lock(global_lock);
8122
8123    auto cb_node = getCBNode(dev_data, commandBuffer);
8124    auto dst_buff_node = getBufferNode(dev_data, dstBuffer);
8125    if (cb_node && dst_buff_node) {
8126        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_node, "vkCmdUpdateBuffer()");
8127        // Update bindings between buffer and cmd buffer
8128        AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_node);
8129        // Validate that DST buffer has correct usage flags set
8130        skip_call |= ValidateBufferUsageFlags(dev_data, dst_buff_node, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8131                                              "vkCmdUpdateBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8132        std::function<bool()> function = [=]() {
8133            SetBufferMemoryValid(dev_data, dst_buff_node, true);
8134            return false;
8135        };
8136        cb_node->validate_functions.push_back(function);
8137
8138        skip_call |= addCmd(dev_data, cb_node, CMD_UPDATEBUFFER, "vkCmdUpdateBuffer()");
8139        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyUpdateBuffer()");
8140    } else {
8141        assert(0);
8142    }
8143    lock.unlock();
8144    if (!skip_call)
8145        dev_data->device_dispatch_table->CmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData);
8146}
8147
8148VKAPI_ATTR void VKAPI_CALL
8149CmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data) {
8150    bool skip_call = false;
8151    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8152    std::unique_lock<std::mutex> lock(global_lock);
8153
8154    auto cb_node = getCBNode(dev_data, commandBuffer);
8155    auto dst_buff_node = getBufferNode(dev_data, dstBuffer);
8156    if (cb_node && dst_buff_node) {
8157        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_node, "vkCmdFillBuffer()");
8158        // Update bindings between buffer and cmd buffer
8159        AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_node);
8160        // Validate that DST buffer has correct usage flags set
8161        skip_call |= ValidateBufferUsageFlags(dev_data, dst_buff_node, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "vkCmdFillBuffer()",
8162                                              "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8163        std::function<bool()> function = [=]() {
8164            SetBufferMemoryValid(dev_data, dst_buff_node, true);
8165            return false;
8166        };
8167        cb_node->validate_functions.push_back(function);
8168
8169        skip_call |= addCmd(dev_data, cb_node, CMD_FILLBUFFER, "vkCmdFillBuffer()");
8170        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyFillBuffer()");
8171    } else {
8172        assert(0);
8173    }
8174    lock.unlock();
8175    if (!skip_call)
8176        dev_data->device_dispatch_table->CmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
8177}
8178
8179VKAPI_ATTR void VKAPI_CALL CmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
8180                                               const VkClearAttachment *pAttachments, uint32_t rectCount,
8181                                               const VkClearRect *pRects) {
8182    bool skip_call = false;
8183    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8184    std::unique_lock<std::mutex> lock(global_lock);
8185    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8186    if (pCB) {
8187        skip_call |= addCmd(dev_data, pCB, CMD_CLEARATTACHMENTS, "vkCmdClearAttachments()");
8188        // Warn if this is issued prior to Draw Cmd and clearing the entire attachment
8189        if (!hasDrawCmd(pCB) && (pCB->activeRenderPassBeginInfo.renderArea.extent.width == pRects[0].rect.extent.width) &&
8190            (pCB->activeRenderPassBeginInfo.renderArea.extent.height == pRects[0].rect.extent.height)) {
8191            // TODO : commandBuffer should be srcObj
8192            // There are times where app needs to use ClearAttachments (generally when reusing a buffer inside of a render pass)
8193            // Can we make this warning more specific? I'd like to avoid triggering this test if we can tell it's a use that must
8194            // call CmdClearAttachments
8195            // Otherwise this seems more like a performance warning.
8196            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
8197                                 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, 0, DRAWSTATE_CLEAR_CMD_BEFORE_DRAW, "DS",
8198                                 "vkCmdClearAttachments() issued on CB object 0x%" PRIxLEAST64 " prior to any Draw Cmds."
8199                                 " It is recommended you use RenderPass LOAD_OP_CLEAR on Attachments prior to any Draw.",
8200                                 (uint64_t)(commandBuffer));
8201        }
8202        skip_call |= outsideRenderPass(dev_data, pCB, "vkCmdClearAttachments()");
8203    }
8204
8205    // Validate that attachment is in reference list of active subpass
8206    if (pCB->activeRenderPass) {
8207        const VkRenderPassCreateInfo *pRPCI = pCB->activeRenderPass->pCreateInfo;
8208        const VkSubpassDescription *pSD = &pRPCI->pSubpasses[pCB->activeSubpass];
8209
8210        for (uint32_t attachment_idx = 0; attachment_idx < attachmentCount; attachment_idx++) {
8211            const VkClearAttachment *attachment = &pAttachments[attachment_idx];
8212            if (attachment->aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) {
8213                if (attachment->colorAttachment >= pSD->colorAttachmentCount) {
8214                    skip_call |= log_msg(
8215                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8216                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
8217                        "vkCmdClearAttachments() color attachment index %d out of range for active subpass %d; ignored",
8218                        attachment->colorAttachment, pCB->activeSubpass);
8219                }
8220                else if (pSD->pColorAttachments[attachment->colorAttachment].attachment == VK_ATTACHMENT_UNUSED) {
8221                    skip_call |= log_msg(
8222                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8223                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
8224                        "vkCmdClearAttachments() color attachment index %d is VK_ATTACHMENT_UNUSED; ignored",
8225                        attachment->colorAttachment);
8226                }
8227            } else if (attachment->aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
8228                if (!pSD->pDepthStencilAttachment || // Says no DS will be used in active subpass
8229                    (pSD->pDepthStencilAttachment->attachment ==
8230                     VK_ATTACHMENT_UNUSED)) { // Says no DS will be used in active subpass
8231
8232                    skip_call |= log_msg(
8233                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8234                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
8235                        "vkCmdClearAttachments() depth/stencil clear with no depth/stencil attachment in subpass; ignored");
8236                }
8237            }
8238        }
8239    }
8240    lock.unlock();
8241    if (!skip_call)
8242        dev_data->device_dispatch_table->CmdClearAttachments(commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
8243}
8244
8245VKAPI_ATTR void VKAPI_CALL CmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image,
8246                                              VkImageLayout imageLayout, const VkClearColorValue *pColor,
8247                                              uint32_t rangeCount, const VkImageSubresourceRange *pRanges) {
8248    bool skip_call = false;
8249    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8250    std::unique_lock<std::mutex> lock(global_lock);
8251    // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
8252
8253    auto cb_node = getCBNode(dev_data, commandBuffer);
8254    auto img_node = getImageNode(dev_data, image);
8255    if (cb_node && img_node) {
8256        skip_call |= ValidateMemoryIsBoundToImage(dev_data, img_node, "vkCmdClearColorImage()");
8257        AddCommandBufferBindingImage(dev_data, cb_node, img_node);
8258        std::function<bool()> function = [=]() {
8259            SetImageMemoryValid(dev_data, img_node, true);
8260            return false;
8261        };
8262        cb_node->validate_functions.push_back(function);
8263
8264        skip_call |= addCmd(dev_data, cb_node, CMD_CLEARCOLORIMAGE, "vkCmdClearColorImage()");
8265        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdClearColorImage()");
8266    } else {
8267        assert(0);
8268    }
8269    lock.unlock();
8270    if (!skip_call)
8271        dev_data->device_dispatch_table->CmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
8272}
8273
8274VKAPI_ATTR void VKAPI_CALL
8275CmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
8276                          const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
8277                          const VkImageSubresourceRange *pRanges) {
8278    bool skip_call = false;
8279    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8280    std::unique_lock<std::mutex> lock(global_lock);
8281    // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
8282
8283    auto cb_node = getCBNode(dev_data, commandBuffer);
8284    auto img_node = getImageNode(dev_data, image);
8285    if (cb_node && img_node) {
8286        skip_call |= ValidateMemoryIsBoundToImage(dev_data, img_node, "vkCmdClearDepthStencilImage()");
8287        AddCommandBufferBindingImage(dev_data, cb_node, img_node);
8288        std::function<bool()> function = [=]() {
8289            SetImageMemoryValid(dev_data, img_node, true);
8290            return false;
8291        };
8292        cb_node->validate_functions.push_back(function);
8293
8294        skip_call |= addCmd(dev_data, cb_node, CMD_CLEARDEPTHSTENCILIMAGE, "vkCmdClearDepthStencilImage()");
8295        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdClearDepthStencilImage()");
8296    } else {
8297        assert(0);
8298    }
8299    lock.unlock();
8300    if (!skip_call)
8301        dev_data->device_dispatch_table->CmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount,
8302                                                                   pRanges);
8303}
8304
8305VKAPI_ATTR void VKAPI_CALL
8306CmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
8307                VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve *pRegions) {
8308    bool skip_call = false;
8309    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8310    std::unique_lock<std::mutex> lock(global_lock);
8311
8312    auto cb_node = getCBNode(dev_data, commandBuffer);
8313    auto src_img_node = getImageNode(dev_data, srcImage);
8314    auto dst_img_node = getImageNode(dev_data, dstImage);
8315    if (cb_node && src_img_node && dst_img_node) {
8316        skip_call |= ValidateMemoryIsBoundToImage(dev_data, src_img_node, "vkCmdResolveImage()");
8317        skip_call |= ValidateMemoryIsBoundToImage(dev_data, dst_img_node, "vkCmdResolveImage()");
8318        // Update bindings between images and cmd buffer
8319        AddCommandBufferBindingImage(dev_data, cb_node, src_img_node);
8320        AddCommandBufferBindingImage(dev_data, cb_node, dst_img_node);
8321        std::function<bool()> function = [=]() {
8322            return ValidateImageMemoryIsValid(dev_data, src_img_node, "vkCmdResolveImage()");
8323        };
8324        cb_node->validate_functions.push_back(function);
8325        function = [=]() {
8326            SetImageMemoryValid(dev_data, dst_img_node, true);
8327            return false;
8328        };
8329        cb_node->validate_functions.push_back(function);
8330
8331        skip_call |= addCmd(dev_data, cb_node, CMD_RESOLVEIMAGE, "vkCmdResolveImage()");
8332        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdResolveImage()");
8333    } else {
8334        assert(0);
8335    }
8336    lock.unlock();
8337    if (!skip_call)
8338        dev_data->device_dispatch_table->CmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
8339                                                         regionCount, pRegions);
8340}
8341
8342bool setEventStageMask(VkQueue queue, VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
8343    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8344    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8345    if (pCB) {
8346        pCB->eventToStageMap[event] = stageMask;
8347    }
8348    auto queue_data = dev_data->queueMap.find(queue);
8349    if (queue_data != dev_data->queueMap.end()) {
8350        queue_data->second.eventToStageMap[event] = stageMask;
8351    }
8352    return false;
8353}
8354
8355VKAPI_ATTR void VKAPI_CALL
8356CmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
8357    bool skip_call = false;
8358    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8359    std::unique_lock<std::mutex> lock(global_lock);
8360    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8361    if (pCB) {
8362        skip_call |= addCmd(dev_data, pCB, CMD_SETEVENT, "vkCmdSetEvent()");
8363        skip_call |= insideRenderPass(dev_data, pCB, "vkCmdSetEvent");
8364        auto event_node = getEventNode(dev_data, event);
8365        if (event_node) {
8366            addCommandBufferBinding(&event_node->cb_bindings,
8367                                    {reinterpret_cast<uint64_t &>(event), VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT}, pCB);
8368            event_node->cb_bindings.insert(pCB);
8369        }
8370        pCB->events.push_back(event);
8371        if (!pCB->waitedEvents.count(event)) {
8372            pCB->writeEventsBeforeWait.push_back(event);
8373        }
8374        std::function<bool(VkQueue)> eventUpdate =
8375            std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, stageMask);
8376        pCB->eventUpdates.push_back(eventUpdate);
8377    }
8378    lock.unlock();
8379    if (!skip_call)
8380        dev_data->device_dispatch_table->CmdSetEvent(commandBuffer, event, stageMask);
8381}
8382
8383VKAPI_ATTR void VKAPI_CALL
8384CmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
8385    bool skip_call = false;
8386    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8387    std::unique_lock<std::mutex> lock(global_lock);
8388    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8389    if (pCB) {
8390        skip_call |= addCmd(dev_data, pCB, CMD_RESETEVENT, "vkCmdResetEvent()");
8391        skip_call |= insideRenderPass(dev_data, pCB, "vkCmdResetEvent");
8392        auto event_node = getEventNode(dev_data, event);
8393        if (event_node) {
8394            addCommandBufferBinding(&event_node->cb_bindings,
8395                                    {reinterpret_cast<uint64_t &>(event), VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT}, pCB);
8396            event_node->cb_bindings.insert(pCB);
8397        }
8398        pCB->events.push_back(event);
8399        if (!pCB->waitedEvents.count(event)) {
8400            pCB->writeEventsBeforeWait.push_back(event);
8401        }
8402        std::function<bool(VkQueue)> eventUpdate =
8403            std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, VkPipelineStageFlags(0));
8404        pCB->eventUpdates.push_back(eventUpdate);
8405    }
8406    lock.unlock();
8407    if (!skip_call)
8408        dev_data->device_dispatch_table->CmdResetEvent(commandBuffer, event, stageMask);
8409}
8410
8411static bool TransitionImageLayouts(VkCommandBuffer cmdBuffer, uint32_t memBarrierCount,
8412                                   const VkImageMemoryBarrier *pImgMemBarriers) {
8413    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
8414    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
8415    bool skip = false;
8416    uint32_t levelCount = 0;
8417    uint32_t layerCount = 0;
8418
8419    for (uint32_t i = 0; i < memBarrierCount; ++i) {
8420        auto mem_barrier = &pImgMemBarriers[i];
8421        if (!mem_barrier)
8422            continue;
8423        // TODO: Do not iterate over every possibility - consolidate where
8424        // possible
8425        ResolveRemainingLevelsLayers(dev_data, &levelCount, &layerCount, mem_barrier->subresourceRange, mem_barrier->image);
8426
8427        for (uint32_t j = 0; j < levelCount; j++) {
8428            uint32_t level = mem_barrier->subresourceRange.baseMipLevel + j;
8429            for (uint32_t k = 0; k < layerCount; k++) {
8430                uint32_t layer = mem_barrier->subresourceRange.baseArrayLayer + k;
8431                VkImageSubresource sub = {mem_barrier->subresourceRange.aspectMask, level, layer};
8432                IMAGE_CMD_BUF_LAYOUT_NODE node;
8433                if (!FindLayout(pCB, mem_barrier->image, sub, node)) {
8434                    SetLayout(pCB, mem_barrier->image, sub,
8435                              IMAGE_CMD_BUF_LAYOUT_NODE(mem_barrier->oldLayout, mem_barrier->newLayout));
8436                    continue;
8437                }
8438                if (mem_barrier->oldLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
8439                    // TODO: Set memory invalid which is in mem_tracker currently
8440                } else if (node.layout != mem_barrier->oldLayout) {
8441                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8442                                    __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "You cannot transition the layout from %s "
8443                                                                                    "when current layout is %s.",
8444                                    string_VkImageLayout(mem_barrier->oldLayout), string_VkImageLayout(node.layout));
8445                }
8446                SetLayout(pCB, mem_barrier->image, sub, mem_barrier->newLayout);
8447            }
8448        }
8449    }
8450    return skip;
8451}
8452
8453// Print readable FlagBits in FlagMask
8454static std::string string_VkAccessFlags(VkAccessFlags accessMask) {
8455    std::string result;
8456    std::string separator;
8457
8458    if (accessMask == 0) {
8459        result = "[None]";
8460    } else {
8461        result = "[";
8462        for (auto i = 0; i < 32; i++) {
8463            if (accessMask & (1 << i)) {
8464                result = result + separator + string_VkAccessFlagBits((VkAccessFlagBits)(1 << i));
8465                separator = " | ";
8466            }
8467        }
8468        result = result + "]";
8469    }
8470    return result;
8471}
8472
8473// AccessFlags MUST have 'required_bit' set, and may have one or more of 'optional_bits' set.
8474// If required_bit is zero, accessMask must have at least one of 'optional_bits' set
8475// TODO: Add tracking to ensure that at least one barrier has been set for these layout transitions
8476static bool ValidateMaskBits(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
8477                             const VkImageLayout &layout, VkAccessFlags required_bit, VkAccessFlags optional_bits,
8478                             const char *type) {
8479    bool skip_call = false;
8480
8481    if ((accessMask & required_bit) || (!required_bit && (accessMask & optional_bits))) {
8482        if (accessMask & ~(required_bit | optional_bits)) {
8483            // TODO: Verify against Valid Use
8484            skip_call |=
8485                log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8486                        DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask 0x%X %s are specified when layout is %s.",
8487                        type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
8488        }
8489    } else {
8490        if (!required_bit) {
8491            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8492                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must contain at least one of access bits %d "
8493                                                                  "%s when layout is %s, unless the app has previously added a "
8494                                                                  "barrier for this transition.",
8495                                 type, accessMask, string_VkAccessFlags(accessMask).c_str(), optional_bits,
8496                                 string_VkAccessFlags(optional_bits).c_str(), string_VkImageLayout(layout));
8497        } else {
8498            std::string opt_bits;
8499            if (optional_bits != 0) {
8500                std::stringstream ss;
8501                ss << optional_bits;
8502                opt_bits = "and may have optional bits " + ss.str() + ' ' + string_VkAccessFlags(optional_bits);
8503            }
8504            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8505                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must have required access bit %d %s %s when "
8506                                                                  "layout is %s, unless the app has previously added a barrier for "
8507                                                                  "this transition.",
8508                                 type, accessMask, string_VkAccessFlags(accessMask).c_str(), required_bit,
8509                                 string_VkAccessFlags(required_bit).c_str(), opt_bits.c_str(), string_VkImageLayout(layout));
8510        }
8511    }
8512    return skip_call;
8513}
8514
8515static bool ValidateMaskBitsFromLayouts(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
8516                                        const VkImageLayout &layout, const char *type) {
8517    bool skip_call = false;
8518    switch (layout) {
8519    case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: {
8520        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
8521                                      VK_ACCESS_COLOR_ATTACHMENT_READ_BIT, type);
8522        break;
8523    }
8524    case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: {
8525        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
8526                                      VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT, type);
8527        break;
8528    }
8529    case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: {
8530        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_WRITE_BIT, 0, type);
8531        break;
8532    }
8533    case VK_IMAGE_LAYOUT_PREINITIALIZED: {
8534        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_HOST_WRITE_BIT, 0, type);
8535        break;
8536    }
8537    case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: {
8538        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
8539                                      VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type);
8540        break;
8541    }
8542    case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: {
8543        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
8544                                      VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type);
8545        break;
8546    }
8547    case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: {
8548        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_READ_BIT, 0, type);
8549        break;
8550    }
8551    case VK_IMAGE_LAYOUT_UNDEFINED: {
8552        if (accessMask != 0) {
8553            // TODO: Verify against Valid Use section spec
8554            skip_call |=
8555                log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8556                        DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask 0x%X %s are specified when layout is %s.",
8557                        type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
8558        }
8559        break;
8560    }
8561    case VK_IMAGE_LAYOUT_GENERAL:
8562    default: { break; }
8563    }
8564    return skip_call;
8565}
8566
8567static bool ValidateBarriers(const char *funcName, VkCommandBuffer cmdBuffer, uint32_t memBarrierCount,
8568                             const VkMemoryBarrier *pMemBarriers, uint32_t bufferBarrierCount,
8569                             const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
8570                             const VkImageMemoryBarrier *pImageMemBarriers) {
8571    bool skip_call = false;
8572    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
8573    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
8574    if (pCB->activeRenderPass && memBarrierCount) {
8575        if (!pCB->activeRenderPass->hasSelfDependency[pCB->activeSubpass]) {
8576            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8577                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Barriers cannot be set during subpass %d "
8578                                                                  "with no self dependency specified.",
8579                                 funcName, pCB->activeSubpass);
8580        }
8581    }
8582    for (uint32_t i = 0; i < imageMemBarrierCount; ++i) {
8583        auto mem_barrier = &pImageMemBarriers[i];
8584        auto image_data = getImageNode(dev_data, mem_barrier->image);
8585        if (image_data) {
8586            uint32_t src_q_f_index = mem_barrier->srcQueueFamilyIndex;
8587            uint32_t dst_q_f_index = mem_barrier->dstQueueFamilyIndex;
8588            if (image_data->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
8589                // srcQueueFamilyIndex and dstQueueFamilyIndex must both
8590                // be VK_QUEUE_FAMILY_IGNORED
8591                if ((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) {
8592                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8593                                         __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
8594                                         "%s: Image Barrier for image 0x%" PRIx64 " was created with sharingMode of "
8595                                         "VK_SHARING_MODE_CONCURRENT.  Src and dst "
8596                                         " queueFamilyIndices must be VK_QUEUE_FAMILY_IGNORED.",
8597                                         funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
8598                }
8599            } else {
8600                // Sharing mode is VK_SHARING_MODE_EXCLUSIVE. srcQueueFamilyIndex and
8601                // dstQueueFamilyIndex must either both be VK_QUEUE_FAMILY_IGNORED,
8602                // or both be a valid queue family
8603                if (((src_q_f_index == VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index == VK_QUEUE_FAMILY_IGNORED)) &&
8604                    (src_q_f_index != dst_q_f_index)) {
8605                    skip_call |=
8606                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8607                                DRAWSTATE_INVALID_QUEUE_INDEX, "DS", "%s: Image 0x%" PRIx64 " was created with sharingMode "
8608                                                                     "of VK_SHARING_MODE_EXCLUSIVE. If one of src- or "
8609                                                                     "dstQueueFamilyIndex is VK_QUEUE_FAMILY_IGNORED, both "
8610                                                                     "must be.",
8611                                funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
8612                } else if (((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) && (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) &&
8613                           ((src_q_f_index >= dev_data->phys_dev_properties.queue_family_properties.size()) ||
8614                            (dst_q_f_index >= dev_data->phys_dev_properties.queue_family_properties.size()))) {
8615                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8616                                         __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
8617                                         "%s: Image 0x%" PRIx64 " was created with sharingMode "
8618                                         "of VK_SHARING_MODE_EXCLUSIVE, but srcQueueFamilyIndex %d"
8619                                         " or dstQueueFamilyIndex %d is greater than " PRINTF_SIZE_T_SPECIFIER
8620                                         "queueFamilies crated for this device.",
8621                                         funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image), src_q_f_index,
8622                                         dst_q_f_index, dev_data->phys_dev_properties.queue_family_properties.size());
8623                }
8624            }
8625        }
8626
8627        if (mem_barrier) {
8628            skip_call |=
8629                ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->srcAccessMask, mem_barrier->oldLayout, "Source");
8630            skip_call |=
8631                ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->dstAccessMask, mem_barrier->newLayout, "Dest");
8632            if (mem_barrier->newLayout == VK_IMAGE_LAYOUT_UNDEFINED || mem_barrier->newLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
8633                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8634                        DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image Layout cannot be transitioned to UNDEFINED or "
8635                                                         "PREINITIALIZED.",
8636                        funcName);
8637            }
8638            auto image_data = getImageNode(dev_data, mem_barrier->image);
8639            VkFormat format = VK_FORMAT_UNDEFINED;
8640            uint32_t arrayLayers = 0, mipLevels = 0;
8641            bool imageFound = false;
8642            if (image_data) {
8643                format = image_data->createInfo.format;
8644                arrayLayers = image_data->createInfo.arrayLayers;
8645                mipLevels = image_data->createInfo.mipLevels;
8646                imageFound = true;
8647            } else if (dev_data->device_extensions.wsi_enabled) {
8648                auto imageswap_data = getSwapchainFromImage(dev_data, mem_barrier->image);
8649                if (imageswap_data) {
8650                    auto swapchain_data = getSwapchainNode(dev_data, imageswap_data);
8651                    if (swapchain_data) {
8652                        format = swapchain_data->createInfo.imageFormat;
8653                        arrayLayers = swapchain_data->createInfo.imageArrayLayers;
8654                        mipLevels = 1;
8655                        imageFound = true;
8656                    }
8657                }
8658            }
8659            if (imageFound) {
8660                auto aspect_mask = mem_barrier->subresourceRange.aspectMask;
8661                if (vk_format_is_depth_or_stencil(format)) {
8662                    if (vk_format_is_depth_and_stencil(format)) {
8663                        if (!(aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT) && !(aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT)) {
8664                            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8665                                    __LINE__, DRAWSTATE_INVALID_BARRIER, "DS",
8666                                    "%s: Image is a depth and stencil format and thus must "
8667                                    "have either one or both of VK_IMAGE_ASPECT_DEPTH_BIT and "
8668                                    "VK_IMAGE_ASPECT_STENCIL_BIT set.",
8669                                    funcName);
8670                        }
8671                    } else if (vk_format_is_depth_only(format)) {
8672                        if (!(aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT)) {
8673                            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8674                                    __LINE__, DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image is a depth-only format and thus must "
8675                                                                               "have VK_IMAGE_ASPECT_DEPTH_BIT set.",
8676                                    funcName);
8677                        }
8678                    } else { // stencil-only case
8679                        if (!(aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT)) {
8680                            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8681                                    __LINE__, DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image is a stencil-only format and thus must "
8682                                                                               "have VK_IMAGE_ASPECT_STENCIL_BIT set.",
8683                                    funcName);
8684                        }
8685                    }
8686                } else { // image is a color format
8687                    if (!(aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT)) {
8688                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8689                                DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image is a color format and thus must "
8690                                                                 "have VK_IMAGE_ASPECT_COLOR_BIT set.",
8691                                funcName);
8692                    }
8693                }
8694                int layerCount = (mem_barrier->subresourceRange.layerCount == VK_REMAINING_ARRAY_LAYERS)
8695                                     ? 1
8696                                     : mem_barrier->subresourceRange.layerCount;
8697                if ((mem_barrier->subresourceRange.baseArrayLayer + layerCount) > arrayLayers) {
8698                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8699                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the "
8700                                                             "baseArrayLayer (%d) and layerCount (%d) be less "
8701                                                             "than or equal to the total number of layers (%d).",
8702                            funcName, mem_barrier->subresourceRange.baseArrayLayer, mem_barrier->subresourceRange.layerCount,
8703                            arrayLayers);
8704                }
8705                int levelCount = (mem_barrier->subresourceRange.levelCount == VK_REMAINING_MIP_LEVELS)
8706                                     ? 1
8707                                     : mem_barrier->subresourceRange.levelCount;
8708                if ((mem_barrier->subresourceRange.baseMipLevel + levelCount) > mipLevels) {
8709                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8710                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the baseMipLevel "
8711                                                             "(%d) and levelCount (%d) be less than or equal to "
8712                                                             "the total number of levels (%d).",
8713                            funcName, mem_barrier->subresourceRange.baseMipLevel, mem_barrier->subresourceRange.levelCount,
8714                            mipLevels);
8715                }
8716            }
8717        }
8718    }
8719    for (uint32_t i = 0; i < bufferBarrierCount; ++i) {
8720        auto mem_barrier = &pBufferMemBarriers[i];
8721        if (pCB->activeRenderPass) {
8722            skip_call |=
8723                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8724                        DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barriers cannot be used during a render pass.", funcName);
8725        }
8726        if (!mem_barrier)
8727            continue;
8728
8729        // Validate buffer barrier queue family indices
8730        if ((mem_barrier->srcQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
8731             mem_barrier->srcQueueFamilyIndex >= dev_data->phys_dev_properties.queue_family_properties.size()) ||
8732            (mem_barrier->dstQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
8733             mem_barrier->dstQueueFamilyIndex >= dev_data->phys_dev_properties.queue_family_properties.size())) {
8734            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8735                                 DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
8736                                 "%s: Buffer Barrier 0x%" PRIx64 " has QueueFamilyIndex greater "
8737                                 "than the number of QueueFamilies (" PRINTF_SIZE_T_SPECIFIER ") for this device.",
8738                                 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
8739                                 dev_data->phys_dev_properties.queue_family_properties.size());
8740        }
8741
8742        auto buffer_node = getBufferNode(dev_data, mem_barrier->buffer);
8743        if (buffer_node) {
8744            auto buffer_size = buffer_node->memSize;
8745            if (mem_barrier->offset >= buffer_size) {
8746                skip_call |= log_msg(
8747                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8748                    DRAWSTATE_INVALID_BARRIER, "DS",
8749                    "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64 " which is not less than total size 0x%" PRIx64 ".",
8750                    funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
8751                    reinterpret_cast<const uint64_t &>(mem_barrier->offset), reinterpret_cast<const uint64_t &>(buffer_size));
8752            } else if (mem_barrier->size != VK_WHOLE_SIZE && (mem_barrier->offset + mem_barrier->size > buffer_size)) {
8753                skip_call |= log_msg(
8754                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8755                    DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64 " and size 0x%" PRIx64
8756                                                     " whose sum is greater than total size 0x%" PRIx64 ".",
8757                    funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
8758                    reinterpret_cast<const uint64_t &>(mem_barrier->offset), reinterpret_cast<const uint64_t &>(mem_barrier->size),
8759                    reinterpret_cast<const uint64_t &>(buffer_size));
8760            }
8761        }
8762    }
8763    return skip_call;
8764}
8765
8766bool validateEventStageMask(VkQueue queue, GLOBAL_CB_NODE *pCB, uint32_t eventCount, size_t firstEventIndex, VkPipelineStageFlags sourceStageMask) {
8767    bool skip_call = false;
8768    VkPipelineStageFlags stageMask = 0;
8769    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
8770    for (uint32_t i = 0; i < eventCount; ++i) {
8771        auto event = pCB->events[firstEventIndex + i];
8772        auto queue_data = dev_data->queueMap.find(queue);
8773        if (queue_data == dev_data->queueMap.end())
8774            return false;
8775        auto event_data = queue_data->second.eventToStageMap.find(event);
8776        if (event_data != queue_data->second.eventToStageMap.end()) {
8777            stageMask |= event_data->second;
8778        } else {
8779            auto global_event_data = getEventNode(dev_data, event);
8780            if (!global_event_data) {
8781                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
8782                                     reinterpret_cast<const uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
8783                                     "Event 0x%" PRIx64 " cannot be waited on if it has never been set.",
8784                                     reinterpret_cast<const uint64_t &>(event));
8785            } else {
8786                stageMask |= global_event_data->stageMask;
8787            }
8788        }
8789    }
8790    // TODO: Need to validate that host_bit is only set if set event is called
8791    // but set event can be called at any time.
8792    if (sourceStageMask != stageMask && sourceStageMask != (stageMask | VK_PIPELINE_STAGE_HOST_BIT)) {
8793        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8794                             DRAWSTATE_INVALID_EVENT, "DS", "Submitting cmdbuffer with call to VkCmdWaitEvents "
8795                                                            "using srcStageMask 0x%X which must be the bitwise "
8796                                                            "OR of the stageMask parameters used in calls to "
8797                                                            "vkCmdSetEvent and VK_PIPELINE_STAGE_HOST_BIT if "
8798                                                            "used with vkSetEvent but instead is 0x%X.",
8799                             sourceStageMask, stageMask);
8800    }
8801    return skip_call;
8802}
8803
8804VKAPI_ATTR void VKAPI_CALL
8805CmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents, VkPipelineStageFlags sourceStageMask,
8806              VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
8807              uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
8808              uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
8809    bool skip_call = false;
8810    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8811    std::unique_lock<std::mutex> lock(global_lock);
8812    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8813    if (pCB) {
8814        auto firstEventIndex = pCB->events.size();
8815        for (uint32_t i = 0; i < eventCount; ++i) {
8816            auto event_node = getEventNode(dev_data, pEvents[i]);
8817            if (event_node) {
8818                addCommandBufferBinding(&event_node->cb_bindings,
8819                                        {reinterpret_cast<const uint64_t &>(pEvents[i]), VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT},
8820                                        pCB);
8821                event_node->cb_bindings.insert(pCB);
8822            }
8823            pCB->waitedEvents.insert(pEvents[i]);
8824            pCB->events.push_back(pEvents[i]);
8825        }
8826        std::function<bool(VkQueue)> eventUpdate =
8827            std::bind(validateEventStageMask, std::placeholders::_1, pCB, eventCount, firstEventIndex, sourceStageMask);
8828        pCB->eventUpdates.push_back(eventUpdate);
8829        if (pCB->state == CB_RECORDING) {
8830            skip_call |= addCmd(dev_data, pCB, CMD_WAITEVENTS, "vkCmdWaitEvents()");
8831        } else {
8832            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWaitEvents()");
8833        }
8834        skip_call |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
8835        skip_call |=
8836            ValidateBarriers("vkCmdWaitEvents", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8837                             pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8838    }
8839    lock.unlock();
8840    if (!skip_call)
8841        dev_data->device_dispatch_table->CmdWaitEvents(commandBuffer, eventCount, pEvents, sourceStageMask, dstStageMask,
8842                                                       memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8843                                                       pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8844}
8845
8846VKAPI_ATTR void VKAPI_CALL
8847CmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
8848                   VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
8849                   uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
8850                   uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
8851    bool skip_call = false;
8852    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8853    std::unique_lock<std::mutex> lock(global_lock);
8854    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8855    if (pCB) {
8856        skip_call |= addCmd(dev_data, pCB, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()");
8857        skip_call |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
8858        skip_call |=
8859            ValidateBarriers("vkCmdPipelineBarrier", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8860                             pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8861    }
8862    lock.unlock();
8863    if (!skip_call)
8864        dev_data->device_dispatch_table->CmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags,
8865                                                            memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8866                                                            pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8867}
8868
8869bool setQueryState(VkQueue queue, VkCommandBuffer commandBuffer, QueryObject object, bool value) {
8870    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8871    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8872    if (pCB) {
8873        pCB->queryToStateMap[object] = value;
8874    }
8875    auto queue_data = dev_data->queueMap.find(queue);
8876    if (queue_data != dev_data->queueMap.end()) {
8877        queue_data->second.queryToStateMap[object] = value;
8878    }
8879    return false;
8880}
8881
8882VKAPI_ATTR void VKAPI_CALL
8883CmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) {
8884    bool skip_call = false;
8885    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8886    std::unique_lock<std::mutex> lock(global_lock);
8887    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8888    if (pCB) {
8889        QueryObject query = {queryPool, slot};
8890        pCB->activeQueries.insert(query);
8891        if (!pCB->startedQueries.count(query)) {
8892            pCB->startedQueries.insert(query);
8893        }
8894        skip_call |= addCmd(dev_data, pCB, CMD_BEGINQUERY, "vkCmdBeginQuery()");
8895        addCommandBufferBinding(&getQueryPoolNode(dev_data, queryPool)->cb_bindings,
8896                                {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT}, pCB);
8897    }
8898    lock.unlock();
8899    if (!skip_call)
8900        dev_data->device_dispatch_table->CmdBeginQuery(commandBuffer, queryPool, slot, flags);
8901}
8902
8903VKAPI_ATTR void VKAPI_CALL CmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) {
8904    bool skip_call = false;
8905    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8906    std::unique_lock<std::mutex> lock(global_lock);
8907    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8908    if (pCB) {
8909        QueryObject query = {queryPool, slot};
8910        if (!pCB->activeQueries.count(query)) {
8911            skip_call |=
8912                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8913                        DRAWSTATE_INVALID_QUERY, "DS", "Ending a query before it was started: queryPool 0x%" PRIx64 ", index %d",
8914                        (uint64_t)(queryPool), slot);
8915        } else {
8916            pCB->activeQueries.erase(query);
8917        }
8918        std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, true);
8919        pCB->queryUpdates.push_back(queryUpdate);
8920        if (pCB->state == CB_RECORDING) {
8921            skip_call |= addCmd(dev_data, pCB, CMD_ENDQUERY, "VkCmdEndQuery()");
8922        } else {
8923            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdEndQuery()");
8924        }
8925        addCommandBufferBinding(&getQueryPoolNode(dev_data, queryPool)->cb_bindings,
8926                                {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT}, pCB);
8927    }
8928    lock.unlock();
8929    if (!skip_call)
8930        dev_data->device_dispatch_table->CmdEndQuery(commandBuffer, queryPool, slot);
8931}
8932
8933VKAPI_ATTR void VKAPI_CALL
8934CmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) {
8935    bool skip_call = false;
8936    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8937    std::unique_lock<std::mutex> lock(global_lock);
8938    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8939    if (pCB) {
8940        for (uint32_t i = 0; i < queryCount; i++) {
8941            QueryObject query = {queryPool, firstQuery + i};
8942            pCB->waitedEventsBeforeQueryReset[query] = pCB->waitedEvents;
8943            std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, false);
8944            pCB->queryUpdates.push_back(queryUpdate);
8945        }
8946        if (pCB->state == CB_RECORDING) {
8947            skip_call |= addCmd(dev_data, pCB, CMD_RESETQUERYPOOL, "VkCmdResetQueryPool()");
8948        } else {
8949            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdResetQueryPool()");
8950        }
8951        skip_call |= insideRenderPass(dev_data, pCB, "vkCmdQueryPool");
8952        addCommandBufferBinding(&getQueryPoolNode(dev_data, queryPool)->cb_bindings,
8953                                {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT}, pCB);
8954    }
8955    lock.unlock();
8956    if (!skip_call)
8957        dev_data->device_dispatch_table->CmdResetQueryPool(commandBuffer, queryPool, firstQuery, queryCount);
8958}
8959
8960bool validateQuery(VkQueue queue, GLOBAL_CB_NODE *pCB, VkQueryPool queryPool, uint32_t queryCount, uint32_t firstQuery) {
8961    bool skip_call = false;
8962    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(pCB->commandBuffer), layer_data_map);
8963    auto queue_data = dev_data->queueMap.find(queue);
8964    if (queue_data == dev_data->queueMap.end())
8965        return false;
8966    for (uint32_t i = 0; i < queryCount; i++) {
8967        QueryObject query = {queryPool, firstQuery + i};
8968        auto query_data = queue_data->second.queryToStateMap.find(query);
8969        bool fail = false;
8970        if (query_data != queue_data->second.queryToStateMap.end()) {
8971            if (!query_data->second) {
8972                fail = true;
8973            }
8974        } else {
8975            auto global_query_data = dev_data->queryToStateMap.find(query);
8976            if (global_query_data != dev_data->queryToStateMap.end()) {
8977                if (!global_query_data->second) {
8978                    fail = true;
8979                }
8980            } else {
8981                fail = true;
8982            }
8983        }
8984        if (fail) {
8985            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8986                                 DRAWSTATE_INVALID_QUERY, "DS",
8987                                 "Requesting a copy from query to buffer with invalid query: queryPool 0x%" PRIx64 ", index %d",
8988                                 reinterpret_cast<uint64_t &>(queryPool), firstQuery + i);
8989        }
8990    }
8991    return skip_call;
8992}
8993
8994VKAPI_ATTR void VKAPI_CALL
8995CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
8996                        VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags) {
8997    bool skip_call = false;
8998    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8999    std::unique_lock<std::mutex> lock(global_lock);
9000
9001    auto cb_node = getCBNode(dev_data, commandBuffer);
9002    auto dst_buff_node = getBufferNode(dev_data, dstBuffer);
9003    if (cb_node && dst_buff_node) {
9004        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_node, "vkCmdCopyQueryPoolResults()");
9005        // Update bindings between buffer and cmd buffer
9006        AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_node);
9007        // Validate that DST buffer has correct usage flags set
9008        skip_call |= ValidateBufferUsageFlags(dev_data, dst_buff_node, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
9009                                              "vkCmdCopyQueryPoolResults()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
9010        std::function<bool()> function = [=]() {
9011            SetBufferMemoryValid(dev_data, dst_buff_node, true);
9012            return false;
9013        };
9014        cb_node->validate_functions.push_back(function);
9015        std::function<bool(VkQueue)> queryUpdate =
9016            std::bind(validateQuery, std::placeholders::_1, cb_node, queryPool, queryCount, firstQuery);
9017        cb_node->queryUpdates.push_back(queryUpdate);
9018        if (cb_node->state == CB_RECORDING) {
9019            skip_call |= addCmd(dev_data, cb_node, CMD_COPYQUERYPOOLRESULTS, "vkCmdCopyQueryPoolResults()");
9020        } else {
9021            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdCopyQueryPoolResults()");
9022        }
9023        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyQueryPoolResults()");
9024        addCommandBufferBinding(&getQueryPoolNode(dev_data, queryPool)->cb_bindings,
9025                                {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT}, cb_node);
9026    } else {
9027        assert(0);
9028    }
9029    lock.unlock();
9030    if (!skip_call)
9031        dev_data->device_dispatch_table->CmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer,
9032                                                                 dstOffset, stride, flags);
9033}
9034
9035VKAPI_ATTR void VKAPI_CALL CmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout,
9036                                            VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size,
9037                                            const void *pValues) {
9038    bool skip_call = false;
9039    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9040    std::unique_lock<std::mutex> lock(global_lock);
9041    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9042    if (pCB) {
9043        if (pCB->state == CB_RECORDING) {
9044            skip_call |= addCmd(dev_data, pCB, CMD_PUSHCONSTANTS, "vkCmdPushConstants()");
9045        } else {
9046            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdPushConstants()");
9047        }
9048    }
9049    skip_call |= validatePushConstantRange(dev_data, offset, size, "vkCmdPushConstants()");
9050    if (0 == stageFlags) {
9051        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9052                             DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCmdPushConstants() call has no stageFlags set.");
9053    }
9054
9055    // Check if push constant update is within any of the ranges with the same stage flags specified in pipeline layout.
9056    auto pipeline_layout = getPipelineLayout(dev_data, layout);
9057    // Coalesce adjacent/overlapping pipeline ranges before checking to see if incoming range is
9058    // contained in the pipeline ranges.
9059    // Build a {start, end} span list for ranges with matching stage flags.
9060    const auto &ranges = pipeline_layout->push_constant_ranges;
9061    struct span {
9062        uint32_t start;
9063        uint32_t end;
9064    };
9065    std::vector<span> spans;
9066    spans.reserve(ranges.size());
9067    for (const auto &iter : ranges) {
9068        if (iter.stageFlags == stageFlags) {
9069            spans.push_back({iter.offset, iter.offset + iter.size});
9070        }
9071    }
9072    if (spans.size() == 0) {
9073        // There were no ranges that matched the stageFlags.
9074        skip_call |=
9075            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9076                    DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCmdPushConstants() stageFlags = 0x%" PRIx32 " do not match "
9077                                                          "the stageFlags in any of the ranges in pipeline layout 0x%" PRIx64 ".",
9078                    (uint32_t)stageFlags, (uint64_t)layout);
9079    } else {
9080        // Sort span list by start value.
9081        struct comparer {
9082            bool operator()(struct span i, struct span j) { return i.start < j.start; }
9083        } my_comparer;
9084        std::sort(spans.begin(), spans.end(), my_comparer);
9085
9086        // Examine two spans at a time.
9087        std::vector<span>::iterator current = spans.begin();
9088        std::vector<span>::iterator next = current + 1;
9089        while (next != spans.end()) {
9090            if (current->end < next->start) {
9091                // There is a gap; cannot coalesce. Move to the next two spans.
9092                ++current;
9093                ++next;
9094            } else {
9095                // Coalesce the two spans.  The start of the next span
9096                // is within the current span, so pick the larger of
9097                // the end values to extend the current span.
9098                // Then delete the next span and set next to the span after it.
9099                current->end = max(current->end, next->end);
9100                next = spans.erase(next);
9101            }
9102        }
9103
9104        // Now we can check if the incoming range is within any of the spans.
9105        bool contained_in_a_range = false;
9106        for (uint32_t i = 0; i < spans.size(); ++i) {
9107            if ((offset >= spans[i].start) && ((uint64_t)offset + (uint64_t)size <= (uint64_t)spans[i].end)) {
9108                contained_in_a_range = true;
9109                break;
9110            }
9111        }
9112        if (!contained_in_a_range) {
9113            skip_call |=
9114                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9115                        DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCmdPushConstants() Push constant range [%d, %d) "
9116                                                              "with stageFlags = 0x%" PRIx32 " "
9117                                                              "not within flag-matching ranges in pipeline layout 0x%" PRIx64 ".",
9118                        offset, offset + size, (uint32_t)stageFlags, (uint64_t)layout);
9119        }
9120    }
9121    lock.unlock();
9122    if (!skip_call)
9123        dev_data->device_dispatch_table->CmdPushConstants(commandBuffer, layout, stageFlags, offset, size, pValues);
9124}
9125
9126VKAPI_ATTR void VKAPI_CALL
9127CmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t slot) {
9128    bool skip_call = false;
9129    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9130    std::unique_lock<std::mutex> lock(global_lock);
9131    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9132    if (pCB) {
9133        QueryObject query = {queryPool, slot};
9134        std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, true);
9135        pCB->queryUpdates.push_back(queryUpdate);
9136        if (pCB->state == CB_RECORDING) {
9137            skip_call |= addCmd(dev_data, pCB, CMD_WRITETIMESTAMP, "vkCmdWriteTimestamp()");
9138        } else {
9139            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWriteTimestamp()");
9140        }
9141    }
9142    lock.unlock();
9143    if (!skip_call)
9144        dev_data->device_dispatch_table->CmdWriteTimestamp(commandBuffer, pipelineStage, queryPool, slot);
9145}
9146
9147static bool MatchUsage(layer_data *dev_data, uint32_t count, const VkAttachmentReference *attachments,
9148                       const VkFramebufferCreateInfo *fbci, VkImageUsageFlagBits usage_flag) {
9149    bool skip_call = false;
9150
9151    for (uint32_t attach = 0; attach < count; attach++) {
9152        if (attachments[attach].attachment != VK_ATTACHMENT_UNUSED) {
9153            // Attachment counts are verified elsewhere, but prevent an invalid access
9154            if (attachments[attach].attachment < fbci->attachmentCount) {
9155                const VkImageView *image_view = &fbci->pAttachments[attachments[attach].attachment];
9156                auto view_state = getImageViewState(dev_data, *image_view);
9157                if (view_state) {
9158                    const VkImageCreateInfo *ici = &getImageNode(dev_data, view_state->create_info.image)->createInfo;
9159                    if (ici != nullptr) {
9160                        if ((ici->usage & usage_flag) == 0) {
9161                            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9162                                                 (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_USAGE, "DS",
9163                                                 "vkCreateFramebuffer:  Framebuffer Attachment (%d) conflicts with the image's "
9164                                                 "IMAGE_USAGE flags (%s).",
9165                                                 attachments[attach].attachment, string_VkImageUsageFlagBits(usage_flag));
9166                        }
9167                    }
9168                }
9169            }
9170        }
9171    }
9172    return skip_call;
9173}
9174
9175// Validate VkFramebufferCreateInfo which includes:
9176// 1. attachmentCount equals renderPass attachmentCount
9177// 2. corresponding framebuffer and renderpass attachments have matching formats
9178// 3. corresponding framebuffer and renderpass attachments have matching sample counts
9179// 4. fb attachments only have a single mip level
9180// 5. fb attachment dimensions are each at least as large as the fb
9181// 6. fb attachments use idenity swizzle
9182// 7. fb attachments used by renderPass for color/input/ds have correct usage bit set
9183// 8. fb dimensions are within physical device limits
9184static bool ValidateFramebufferCreateInfo(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) {
9185    bool skip_call = false;
9186
9187    auto rp_node = getRenderPass(dev_data, pCreateInfo->renderPass);
9188    if (rp_node) {
9189        const VkRenderPassCreateInfo *rpci = rp_node->pCreateInfo;
9190        if (rpci->attachmentCount != pCreateInfo->attachmentCount) {
9191            skip_call |= log_msg(
9192                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
9193                reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
9194                "vkCreateFramebuffer(): VkFramebufferCreateInfo attachmentCount of %u does not match attachmentCount of %u of "
9195                "renderPass (0x%" PRIxLEAST64 ") being used to create Framebuffer.",
9196                pCreateInfo->attachmentCount, rpci->attachmentCount, reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass));
9197        } else {
9198            // attachmentCounts match, so make sure corresponding attachment details line up
9199            const VkImageView *image_views = pCreateInfo->pAttachments;
9200            for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
9201                auto view_state = getImageViewState(dev_data, image_views[i]);
9202                auto ivci = view_state->create_info;
9203                if (ivci.format != rpci->pAttachments[i].format) {
9204                    skip_call |= log_msg(
9205                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
9206                        reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE,
9207                        "DS", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has format of %s that does not match "
9208                              "the format of "
9209                              "%s used by the corresponding attachment for renderPass (0x%" PRIxLEAST64 ").",
9210                        i, string_VkFormat(ivci.format), string_VkFormat(rpci->pAttachments[i].format),
9211                        reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass));
9212                }
9213                const VkImageCreateInfo *ici = &getImageNode(dev_data, ivci.image)->createInfo;
9214                if (ici->samples != rpci->pAttachments[i].samples) {
9215                    skip_call |= log_msg(
9216                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
9217                        reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE,
9218                        "DS", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has %s samples that do not match "
9219                              "the %s samples used by the corresponding attachment for renderPass (0x%" PRIxLEAST64 ").",
9220                        i, string_VkSampleCountFlagBits(ici->samples), string_VkSampleCountFlagBits(rpci->pAttachments[i].samples),
9221                        reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass));
9222                }
9223                // Verify that view only has a single mip level
9224                if (ivci.subresourceRange.levelCount != 1) {
9225                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
9226                                         __LINE__, DRAWSTATE_INVALID_FRAMEBUFFER_CREATE_INFO, "DS",
9227                                         "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has mip levelCount of %u "
9228                                         "but only a single mip level (levelCount ==  1) is allowed when creating a Framebuffer.",
9229                                         i, ivci.subresourceRange.levelCount);
9230                }
9231                const uint32_t mip_level = ivci.subresourceRange.baseMipLevel;
9232                uint32_t mip_width = max(1u, ici->extent.width >> mip_level);
9233                uint32_t mip_height = max(1u, ici->extent.height >> mip_level);
9234                if ((ivci.subresourceRange.layerCount < pCreateInfo->layers) || (mip_width < pCreateInfo->width) ||
9235                    (mip_height < pCreateInfo->height)) {
9236                    skip_call |=
9237                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
9238                                DRAWSTATE_INVALID_FRAMEBUFFER_CREATE_INFO, "DS",
9239                                "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has dimensions smaller "
9240                                "than the corresponding "
9241                                "framebuffer dimensions. Attachment dimensions must be at least as large. Here are the respective "
9242                                "dimensions for "
9243                                "attachment #%u, framebuffer:\n"
9244                                "width: %u, %u\n"
9245                                "height: %u, %u\n"
9246                                "layerCount: %u, %u\n",
9247                                i, ivci.subresourceRange.baseMipLevel, i, mip_width, pCreateInfo->width, mip_height,
9248                                pCreateInfo->height, ivci.subresourceRange.layerCount, pCreateInfo->layers);
9249                }
9250                if (((ivci.components.r != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.r != VK_COMPONENT_SWIZZLE_R)) ||
9251                    ((ivci.components.g != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.g != VK_COMPONENT_SWIZZLE_G)) ||
9252                    ((ivci.components.b != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.b != VK_COMPONENT_SWIZZLE_B)) ||
9253                    ((ivci.components.a != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.a != VK_COMPONENT_SWIZZLE_A))) {
9254                    skip_call |= log_msg(
9255                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
9256                        DRAWSTATE_INVALID_FRAMEBUFFER_CREATE_INFO, "DS",
9257                        "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has non-identy swizzle. All framebuffer "
9258                        "attachments must have been created with the identity swizzle. Here are the actual swizzle values:\n"
9259                        "r swizzle = %s\n"
9260                        "g swizzle = %s\n"
9261                        "b swizzle = %s\n"
9262                        "a swizzle = %s\n",
9263                        i, string_VkComponentSwizzle(ivci.components.r), string_VkComponentSwizzle(ivci.components.g),
9264                        string_VkComponentSwizzle(ivci.components.b), string_VkComponentSwizzle(ivci.components.a));
9265                }
9266            }
9267        }
9268        // Verify correct attachment usage flags
9269        for (uint32_t subpass = 0; subpass < rpci->subpassCount; subpass++) {
9270            // Verify input attachments:
9271            skip_call |= MatchUsage(dev_data, rpci->pSubpasses[subpass].inputAttachmentCount,
9272                                    rpci->pSubpasses[subpass].pInputAttachments, pCreateInfo, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT);
9273            // Verify color attachments:
9274            skip_call |= MatchUsage(dev_data, rpci->pSubpasses[subpass].colorAttachmentCount,
9275                                    rpci->pSubpasses[subpass].pColorAttachments, pCreateInfo, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT);
9276            // Verify depth/stencil attachments:
9277            if (rpci->pSubpasses[subpass].pDepthStencilAttachment != nullptr) {
9278                skip_call |= MatchUsage(dev_data, 1, rpci->pSubpasses[subpass].pDepthStencilAttachment, pCreateInfo,
9279                                        VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT);
9280            }
9281        }
9282    } else {
9283        skip_call |=
9284            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
9285                    reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9286                    "vkCreateFramebuffer(): Attempt to create framebuffer with invalid renderPass (0x%" PRIxLEAST64 ").",
9287                    reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass));
9288    }
9289    // Verify FB dimensions are within physical device limits
9290    if ((pCreateInfo->height > dev_data->phys_dev_properties.properties.limits.maxFramebufferHeight) ||
9291        (pCreateInfo->width > dev_data->phys_dev_properties.properties.limits.maxFramebufferWidth) ||
9292        (pCreateInfo->layers > dev_data->phys_dev_properties.properties.limits.maxFramebufferLayers)) {
9293        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
9294                             DRAWSTATE_INVALID_FRAMEBUFFER_CREATE_INFO, "DS",
9295                             "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo dimensions exceed physical device limits. "
9296                             "Here are the respective dimensions: requested, device max:\n"
9297                             "width: %u, %u\n"
9298                             "height: %u, %u\n"
9299                             "layerCount: %u, %u\n",
9300                             pCreateInfo->width, dev_data->phys_dev_properties.properties.limits.maxFramebufferWidth,
9301                             pCreateInfo->height, dev_data->phys_dev_properties.properties.limits.maxFramebufferHeight,
9302                             pCreateInfo->layers, dev_data->phys_dev_properties.properties.limits.maxFramebufferLayers);
9303    }
9304    return skip_call;
9305}
9306
9307// Validate VkFramebufferCreateInfo state prior to calling down chain to create Framebuffer object
9308//  Return true if an error is encountered and callback returns true to skip call down chain
9309//   false indicates that call down chain should proceed
9310static bool PreCallValidateCreateFramebuffer(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) {
9311    // TODO : Verify that renderPass FB is created with is compatible with FB
9312    bool skip_call = false;
9313    skip_call |= ValidateFramebufferCreateInfo(dev_data, pCreateInfo);
9314    return skip_call;
9315}
9316
9317// CreateFramebuffer state has been validated and call down chain completed so record new framebuffer object
9318static void PostCallRecordCreateFramebuffer(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo, VkFramebuffer fb) {
9319    // Shadow create info and store in map
9320    std::unique_ptr<FRAMEBUFFER_NODE> fb_node(
9321        new FRAMEBUFFER_NODE(fb, pCreateInfo, dev_data->renderPassMap[pCreateInfo->renderPass]->pCreateInfo));
9322
9323    for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
9324        VkImageView view = pCreateInfo->pAttachments[i];
9325        auto view_state = getImageViewState(dev_data, view);
9326        if (!view_state) {
9327            continue;
9328        }
9329        MT_FB_ATTACHMENT_INFO fb_info;
9330        fb_info.mem = getImageNode(dev_data, view_state->create_info.image)->mem;
9331        fb_info.view_state = view_state;
9332        fb_info.image = view_state->create_info.image;
9333        fb_node->attachments.push_back(fb_info);
9334    }
9335    dev_data->frameBufferMap[fb] = std::move(fb_node);
9336}
9337
9338VKAPI_ATTR VkResult VKAPI_CALL CreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo,
9339                                                 const VkAllocationCallbacks *pAllocator,
9340                                                 VkFramebuffer *pFramebuffer) {
9341    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9342    std::unique_lock<std::mutex> lock(global_lock);
9343    bool skip_call = PreCallValidateCreateFramebuffer(dev_data, pCreateInfo);
9344    lock.unlock();
9345
9346    if (skip_call)
9347        return VK_ERROR_VALIDATION_FAILED_EXT;
9348
9349    VkResult result = dev_data->device_dispatch_table->CreateFramebuffer(device, pCreateInfo, pAllocator, pFramebuffer);
9350
9351    if (VK_SUCCESS == result) {
9352        lock.lock();
9353        PostCallRecordCreateFramebuffer(dev_data, pCreateInfo, *pFramebuffer);
9354        lock.unlock();
9355    }
9356    return result;
9357}
9358
9359static bool FindDependency(const int index, const int dependent, const std::vector<DAGNode> &subpass_to_node,
9360                           std::unordered_set<uint32_t> &processed_nodes) {
9361    // If we have already checked this node we have not found a dependency path so return false.
9362    if (processed_nodes.count(index))
9363        return false;
9364    processed_nodes.insert(index);
9365    const DAGNode &node = subpass_to_node[index];
9366    // Look for a dependency path. If one exists return true else recurse on the previous nodes.
9367    if (std::find(node.prev.begin(), node.prev.end(), dependent) == node.prev.end()) {
9368        for (auto elem : node.prev) {
9369            if (FindDependency(elem, dependent, subpass_to_node, processed_nodes))
9370                return true;
9371        }
9372    } else {
9373        return true;
9374    }
9375    return false;
9376}
9377
9378static bool CheckDependencyExists(const layer_data *my_data, const int subpass, const std::vector<uint32_t> &dependent_subpasses,
9379                                  const std::vector<DAGNode> &subpass_to_node, bool &skip_call) {
9380    bool result = true;
9381    // Loop through all subpasses that share the same attachment and make sure a dependency exists
9382    for (uint32_t k = 0; k < dependent_subpasses.size(); ++k) {
9383        if (static_cast<uint32_t>(subpass) == dependent_subpasses[k])
9384            continue;
9385        const DAGNode &node = subpass_to_node[subpass];
9386        // Check for a specified dependency between the two nodes. If one exists we are done.
9387        auto prev_elem = std::find(node.prev.begin(), node.prev.end(), dependent_subpasses[k]);
9388        auto next_elem = std::find(node.next.begin(), node.next.end(), dependent_subpasses[k]);
9389        if (prev_elem == node.prev.end() && next_elem == node.next.end()) {
9390            // If no dependency exits an implicit dependency still might. If not, throw an error.
9391            std::unordered_set<uint32_t> processed_nodes;
9392            if (!(FindDependency(subpass, dependent_subpasses[k], subpass_to_node, processed_nodes) ||
9393                FindDependency(dependent_subpasses[k], subpass, subpass_to_node, processed_nodes))) {
9394                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9395                                     __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9396                                     "A dependency between subpasses %d and %d must exist but one is not specified.", subpass,
9397                                     dependent_subpasses[k]);
9398                result = false;
9399            }
9400        }
9401    }
9402    return result;
9403}
9404
9405static bool CheckPreserved(const layer_data *my_data, const VkRenderPassCreateInfo *pCreateInfo, const int index,
9406                           const uint32_t attachment, const std::vector<DAGNode> &subpass_to_node, int depth, bool &skip_call) {
9407    const DAGNode &node = subpass_to_node[index];
9408    // If this node writes to the attachment return true as next nodes need to preserve the attachment.
9409    const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
9410    for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9411        if (attachment == subpass.pColorAttachments[j].attachment)
9412            return true;
9413    }
9414    if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9415        if (attachment == subpass.pDepthStencilAttachment->attachment)
9416            return true;
9417    }
9418    bool result = false;
9419    // Loop through previous nodes and see if any of them write to the attachment.
9420    for (auto elem : node.prev) {
9421        result |= CheckPreserved(my_data, pCreateInfo, elem, attachment, subpass_to_node, depth + 1, skip_call);
9422    }
9423    // If the attachment was written to by a previous node than this node needs to preserve it.
9424    if (result && depth > 0) {
9425        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
9426        bool has_preserved = false;
9427        for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
9428            if (subpass.pPreserveAttachments[j] == attachment) {
9429                has_preserved = true;
9430                break;
9431            }
9432        }
9433        if (!has_preserved) {
9434            skip_call |=
9435                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9436                        DRAWSTATE_INVALID_RENDERPASS, "DS",
9437                        "Attachment %d is used by a later subpass and must be preserved in subpass %d.", attachment, index);
9438        }
9439    }
9440    return result;
9441}
9442
9443template <class T> bool isRangeOverlapping(T offset1, T size1, T offset2, T size2) {
9444    return (((offset1 + size1) > offset2) && ((offset1 + size1) < (offset2 + size2))) ||
9445           ((offset1 > offset2) && (offset1 < (offset2 + size2)));
9446}
9447
9448bool isRegionOverlapping(VkImageSubresourceRange range1, VkImageSubresourceRange range2) {
9449    return (isRangeOverlapping(range1.baseMipLevel, range1.levelCount, range2.baseMipLevel, range2.levelCount) &&
9450            isRangeOverlapping(range1.baseArrayLayer, range1.layerCount, range2.baseArrayLayer, range2.layerCount));
9451}
9452
9453static bool ValidateDependencies(const layer_data *my_data, FRAMEBUFFER_NODE const * framebuffer,
9454                                 RENDER_PASS_NODE const * renderPass) {
9455    bool skip_call = false;
9456    const safe_VkFramebufferCreateInfo *pFramebufferInfo = &framebuffer->createInfo;
9457    const VkRenderPassCreateInfo *pCreateInfo = renderPass->pCreateInfo;
9458    auto const & subpass_to_node = renderPass->subpassToNode;
9459    std::vector<std::vector<uint32_t>> output_attachment_to_subpass(pCreateInfo->attachmentCount);
9460    std::vector<std::vector<uint32_t>> input_attachment_to_subpass(pCreateInfo->attachmentCount);
9461    std::vector<std::vector<uint32_t>> overlapping_attachments(pCreateInfo->attachmentCount);
9462    // Find overlapping attachments
9463    for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
9464        for (uint32_t j = i + 1; j < pCreateInfo->attachmentCount; ++j) {
9465            VkImageView viewi = pFramebufferInfo->pAttachments[i];
9466            VkImageView viewj = pFramebufferInfo->pAttachments[j];
9467            if (viewi == viewj) {
9468                overlapping_attachments[i].push_back(j);
9469                overlapping_attachments[j].push_back(i);
9470                continue;
9471            }
9472            auto view_state_i = getImageViewState(my_data, viewi);
9473            auto view_state_j = getImageViewState(my_data, viewj);
9474            if (!view_state_i || !view_state_j) {
9475                continue;
9476            }
9477            auto view_ci_i = view_state_i->create_info;
9478            auto view_ci_j = view_state_j->create_info;
9479            if (view_ci_i.image == view_ci_j.image && isRegionOverlapping(view_ci_i.subresourceRange, view_ci_j.subresourceRange)) {
9480                overlapping_attachments[i].push_back(j);
9481                overlapping_attachments[j].push_back(i);
9482                continue;
9483            }
9484            auto image_data_i = getImageNode(my_data, view_ci_i.image);
9485            auto image_data_j = getImageNode(my_data, view_ci_j.image);
9486            if (!image_data_i || !image_data_j) {
9487                continue;
9488            }
9489            if (image_data_i->mem == image_data_j->mem && isRangeOverlapping(image_data_i->memOffset, image_data_i->memSize,
9490                                                                             image_data_j->memOffset, image_data_j->memSize)) {
9491                overlapping_attachments[i].push_back(j);
9492                overlapping_attachments[j].push_back(i);
9493            }
9494        }
9495    }
9496    for (uint32_t i = 0; i < overlapping_attachments.size(); ++i) {
9497        uint32_t attachment = i;
9498        for (auto other_attachment : overlapping_attachments[i]) {
9499            if (!(pCreateInfo->pAttachments[attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
9500                skip_call |=
9501                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9502                            DRAWSTATE_INVALID_RENDERPASS, "DS", "Attachment %d aliases attachment %d but doesn't "
9503                                                                "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
9504                            attachment, other_attachment);
9505            }
9506            if (!(pCreateInfo->pAttachments[other_attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
9507                skip_call |=
9508                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9509                            DRAWSTATE_INVALID_RENDERPASS, "DS", "Attachment %d aliases attachment %d but doesn't "
9510                                                                "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
9511                            other_attachment, attachment);
9512            }
9513        }
9514    }
9515    // Find for each attachment the subpasses that use them.
9516    unordered_set<uint32_t> attachmentIndices;
9517    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9518        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9519        attachmentIndices.clear();
9520        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9521            uint32_t attachment = subpass.pInputAttachments[j].attachment;
9522            if (attachment == VK_ATTACHMENT_UNUSED)
9523                continue;
9524            input_attachment_to_subpass[attachment].push_back(i);
9525            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
9526                input_attachment_to_subpass[overlapping_attachment].push_back(i);
9527            }
9528        }
9529        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9530            uint32_t attachment = subpass.pColorAttachments[j].attachment;
9531            if (attachment == VK_ATTACHMENT_UNUSED)
9532                continue;
9533            output_attachment_to_subpass[attachment].push_back(i);
9534            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
9535                output_attachment_to_subpass[overlapping_attachment].push_back(i);
9536            }
9537            attachmentIndices.insert(attachment);
9538        }
9539        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9540            uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
9541            output_attachment_to_subpass[attachment].push_back(i);
9542            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
9543                output_attachment_to_subpass[overlapping_attachment].push_back(i);
9544            }
9545
9546            if (attachmentIndices.count(attachment)) {
9547                skip_call |=
9548                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
9549                            0, __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9550                            "Cannot use same attachment (%u) as both color and depth output in same subpass (%u).",
9551                            attachment, i);
9552            }
9553        }
9554    }
9555    // If there is a dependency needed make sure one exists
9556    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9557        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9558        // If the attachment is an input then all subpasses that output must have a dependency relationship
9559        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9560            uint32_t attachment = subpass.pInputAttachments[j].attachment;
9561            if (attachment == VK_ATTACHMENT_UNUSED)
9562                continue;
9563            CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9564        }
9565        // If the attachment is an output then all subpasses that use the attachment must have a dependency relationship
9566        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9567            uint32_t attachment = subpass.pColorAttachments[j].attachment;
9568            if (attachment == VK_ATTACHMENT_UNUSED)
9569                continue;
9570            CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9571            CheckDependencyExists(my_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9572        }
9573        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9574            const uint32_t &attachment = subpass.pDepthStencilAttachment->attachment;
9575            CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9576            CheckDependencyExists(my_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9577        }
9578    }
9579    // Loop through implicit dependencies, if this pass reads make sure the attachment is preserved for all passes after it was
9580    // written.
9581    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9582        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9583        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9584            CheckPreserved(my_data, pCreateInfo, i, subpass.pInputAttachments[j].attachment, subpass_to_node, 0, skip_call);
9585        }
9586    }
9587    return skip_call;
9588}
9589// ValidateLayoutVsAttachmentDescription is a general function where we can validate various state associated with the
9590// VkAttachmentDescription structs that are used by the sub-passes of a renderpass. Initial check is to make sure that
9591// READ_ONLY layout attachments don't have CLEAR as their loadOp.
9592static bool ValidateLayoutVsAttachmentDescription(debug_report_data *report_data, const VkImageLayout first_layout,
9593                                                  const uint32_t attachment,
9594                                                  const VkAttachmentDescription &attachment_description) {
9595    bool skip_call = false;
9596    // Verify that initial loadOp on READ_ONLY attachments is not CLEAR
9597    if (attachment_description.loadOp == VK_ATTACHMENT_LOAD_OP_CLEAR) {
9598        if ((first_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) ||
9599            (first_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL)) {
9600            skip_call |=
9601                log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
9602                        VkDebugReportObjectTypeEXT(0), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9603                        "Cannot clear attachment %d with invalid first layout %s.", attachment, string_VkImageLayout(first_layout));
9604        }
9605    }
9606    return skip_call;
9607}
9608
9609static bool ValidateLayouts(const layer_data *my_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo) {
9610    bool skip = false;
9611
9612    // Track when we're observing the first use of an attachment
9613    std::vector<bool> attach_first_use(pCreateInfo->attachmentCount, true);
9614    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9615        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9616        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9617            auto attach_index = subpass.pColorAttachments[j].attachment;
9618            if (attach_index == VK_ATTACHMENT_UNUSED)
9619                continue;
9620
9621            switch (subpass.pColorAttachments[j].layout) {
9622            case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
9623                /* This is ideal. */
9624                break;
9625
9626            case VK_IMAGE_LAYOUT_GENERAL:
9627                /* May not be optimal; TODO: reconsider this warning based on
9628                 * other constraints?
9629                 */
9630                skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
9631                                VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
9632                                DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9633                                "Layout for color attachment is GENERAL but should be COLOR_ATTACHMENT_OPTIMAL.");
9634                break;
9635
9636            default:
9637                skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9638                                VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
9639                                DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9640                                "Layout for color attachment is %s but can only be COLOR_ATTACHMENT_OPTIMAL or GENERAL.",
9641                                string_VkImageLayout(subpass.pColorAttachments[j].layout));
9642            }
9643
9644            if (attach_first_use[attach_index]) {
9645                skip |= ValidateLayoutVsAttachmentDescription(my_data->report_data, subpass.pColorAttachments[j].layout,
9646                                                              attach_index, pCreateInfo->pAttachments[attach_index]);
9647            }
9648            attach_first_use[attach_index] = false;
9649        }
9650        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9651            switch (subpass.pDepthStencilAttachment->layout) {
9652            case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
9653            case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
9654                /* These are ideal. */
9655                break;
9656
9657            case VK_IMAGE_LAYOUT_GENERAL:
9658                /* May not be optimal; TODO: reconsider this warning based on
9659                 * other constraints? GENERAL can be better than doing a bunch
9660                 * of transitions.
9661                 */
9662                skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
9663                                VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
9664                                DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9665                                "GENERAL layout for depth attachment may not give optimal performance.");
9666                break;
9667
9668            default:
9669                /* No other layouts are acceptable */
9670                skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9671                                VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
9672                                DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9673                                "Layout for depth attachment is %s but can only be DEPTH_STENCIL_ATTACHMENT_OPTIMAL, "
9674                                "DEPTH_STENCIL_READ_ONLY_OPTIMAL or GENERAL.",
9675                                string_VkImageLayout(subpass.pDepthStencilAttachment->layout));
9676            }
9677
9678            auto attach_index = subpass.pDepthStencilAttachment->attachment;
9679            if (attach_first_use[attach_index]) {
9680                skip |= ValidateLayoutVsAttachmentDescription(my_data->report_data, subpass.pDepthStencilAttachment->layout,
9681                                                              attach_index, pCreateInfo->pAttachments[attach_index]);
9682            }
9683            attach_first_use[attach_index] = false;
9684        }
9685        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9686            auto attach_index = subpass.pInputAttachments[j].attachment;
9687            if (attach_index == VK_ATTACHMENT_UNUSED)
9688                continue;
9689
9690            switch (subpass.pInputAttachments[j].layout) {
9691            case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
9692            case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
9693                /* These are ideal. */
9694                break;
9695
9696            case VK_IMAGE_LAYOUT_GENERAL:
9697                /* May not be optimal. TODO: reconsider this warning based on
9698                 * other constraints.
9699                 */
9700                skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
9701                                VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
9702                                DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9703                                "Layout for input attachment is GENERAL but should be READ_ONLY_OPTIMAL.");
9704                break;
9705
9706            default:
9707                /* No other layouts are acceptable */
9708                skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9709                                DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9710                                "Layout for input attachment is %s but can only be READ_ONLY_OPTIMAL or GENERAL.",
9711                                string_VkImageLayout(subpass.pInputAttachments[j].layout));
9712            }
9713
9714            if (attach_first_use[attach_index]) {
9715                skip |= ValidateLayoutVsAttachmentDescription(my_data->report_data, subpass.pInputAttachments[j].layout,
9716                                                              attach_index, pCreateInfo->pAttachments[attach_index]);
9717            }
9718            attach_first_use[attach_index] = false;
9719        }
9720    }
9721    return skip;
9722}
9723
9724static bool CreatePassDAG(const layer_data *my_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
9725                          std::vector<DAGNode> &subpass_to_node, std::vector<bool> &has_self_dependency) {
9726    bool skip_call = false;
9727    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9728        DAGNode &subpass_node = subpass_to_node[i];
9729        subpass_node.pass = i;
9730    }
9731    for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
9732        const VkSubpassDependency &dependency = pCreateInfo->pDependencies[i];
9733        if (dependency.srcSubpass > dependency.dstSubpass && dependency.srcSubpass != VK_SUBPASS_EXTERNAL &&
9734            dependency.dstSubpass != VK_SUBPASS_EXTERNAL) {
9735            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9736                                 DRAWSTATE_INVALID_RENDERPASS, "DS",
9737                                 "Depedency graph must be specified such that an earlier pass cannot depend on a later pass.");
9738        } else if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL && dependency.dstSubpass == VK_SUBPASS_EXTERNAL) {
9739            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9740                                 DRAWSTATE_INVALID_RENDERPASS, "DS", "The src and dest subpasses cannot both be external.");
9741        } else if (dependency.srcSubpass == dependency.dstSubpass) {
9742            has_self_dependency[dependency.srcSubpass] = true;
9743        }
9744        if (dependency.dstSubpass != VK_SUBPASS_EXTERNAL) {
9745            subpass_to_node[dependency.dstSubpass].prev.push_back(dependency.srcSubpass);
9746        }
9747        if (dependency.srcSubpass != VK_SUBPASS_EXTERNAL) {
9748            subpass_to_node[dependency.srcSubpass].next.push_back(dependency.dstSubpass);
9749        }
9750    }
9751    return skip_call;
9752}
9753
9754
9755VKAPI_ATTR VkResult VKAPI_CALL CreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo,
9756                                                  const VkAllocationCallbacks *pAllocator,
9757                                                  VkShaderModule *pShaderModule) {
9758    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9759    bool skip_call = false;
9760
9761    /* Use SPIRV-Tools validator to try and catch any issues with the module itself */
9762    spv_context ctx = spvContextCreate(SPV_ENV_VULKAN_1_0);
9763    spv_const_binary_t binary { pCreateInfo->pCode, pCreateInfo->codeSize / sizeof(uint32_t) };
9764    spv_diagnostic diag = nullptr;
9765
9766    auto result = spvValidate(ctx, &binary, &diag);
9767    if (result != SPV_SUCCESS) {
9768        skip_call |= log_msg(my_data->report_data,
9769                             result == SPV_WARNING ? VK_DEBUG_REPORT_WARNING_BIT_EXT : VK_DEBUG_REPORT_ERROR_BIT_EXT,
9770                             VkDebugReportObjectTypeEXT(0), 0,
9771                             __LINE__, SHADER_CHECKER_INCONSISTENT_SPIRV, "SC", "SPIR-V module not valid: %s",
9772                             diag && diag->error ? diag->error : "(no error text)");
9773    }
9774
9775    spvDiagnosticDestroy(diag);
9776    spvContextDestroy(ctx);
9777
9778    if (skip_call)
9779        return VK_ERROR_VALIDATION_FAILED_EXT;
9780
9781    VkResult res = my_data->device_dispatch_table->CreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule);
9782
9783    if (res == VK_SUCCESS) {
9784        std::lock_guard<std::mutex> lock(global_lock);
9785        my_data->shaderModuleMap[*pShaderModule] = unique_ptr<shader_module>(new shader_module(pCreateInfo));
9786    }
9787    return res;
9788}
9789
9790static bool ValidateAttachmentIndex(layer_data *dev_data, uint32_t attachment, uint32_t attachment_count, const char *type) {
9791    bool skip_call = false;
9792    if (attachment >= attachment_count && attachment != VK_ATTACHMENT_UNUSED) {
9793        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9794                             DRAWSTATE_INVALID_ATTACHMENT_INDEX, "DS",
9795                             "CreateRenderPass: %s attachment %d cannot be greater than the total number of attachments %d.",
9796                             type, attachment, attachment_count);
9797    }
9798    return skip_call;
9799}
9800
9801static bool IsPowerOfTwo(unsigned x) {
9802    return x && !(x & (x-1));
9803}
9804
9805static bool ValidateRenderpassAttachmentUsage(layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo) {
9806    bool skip_call = false;
9807    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9808        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9809        if (subpass.pipelineBindPoint != VK_PIPELINE_BIND_POINT_GRAPHICS) {
9810            skip_call |=
9811                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9812                        DRAWSTATE_INVALID_RENDERPASS, "DS",
9813                        "CreateRenderPass: Pipeline bind point for subpass %d must be VK_PIPELINE_BIND_POINT_GRAPHICS.", i);
9814        }
9815        for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
9816            uint32_t attachment = subpass.pPreserveAttachments[j];
9817            if (attachment == VK_ATTACHMENT_UNUSED) {
9818                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9819                                     __LINE__, DRAWSTATE_INVALID_ATTACHMENT_INDEX, "DS",
9820                                     "CreateRenderPass:  Preserve attachment (%d) must not be VK_ATTACHMENT_UNUSED.", j);
9821            } else {
9822                skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Preserve");
9823            }
9824        }
9825
9826        auto subpass_performs_resolve = subpass.pResolveAttachments && std::any_of(
9827            subpass.pResolveAttachments, subpass.pResolveAttachments + subpass.colorAttachmentCount,
9828            [](VkAttachmentReference ref) { return ref.attachment != VK_ATTACHMENT_UNUSED; });
9829
9830        unsigned sample_count = 0;
9831
9832        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9833            uint32_t attachment;
9834            if (subpass.pResolveAttachments) {
9835                attachment = subpass.pResolveAttachments[j].attachment;
9836                skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Resolve");
9837
9838                if (!skip_call && attachment != VK_ATTACHMENT_UNUSED &&
9839                    pCreateInfo->pAttachments[attachment].samples != VK_SAMPLE_COUNT_1_BIT) {
9840                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
9841                                         __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9842                                         "CreateRenderPass:  Subpass %u requests multisample resolve into attachment %u, "
9843                                         "which must have VK_SAMPLE_COUNT_1_BIT but has %s",
9844                                         i, attachment, string_VkSampleCountFlagBits(pCreateInfo->pAttachments[attachment].samples));
9845                }
9846            }
9847            attachment = subpass.pColorAttachments[j].attachment;
9848            skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Color");
9849
9850            if (!skip_call && attachment != VK_ATTACHMENT_UNUSED) {
9851                sample_count |= (unsigned)pCreateInfo->pAttachments[attachment].samples;
9852
9853                if (subpass_performs_resolve &&
9854                    pCreateInfo->pAttachments[attachment].samples == VK_SAMPLE_COUNT_1_BIT) {
9855                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
9856                                         __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9857                                         "CreateRenderPass:  Subpass %u requests multisample resolve from attachment %u "
9858                                         "which has VK_SAMPLE_COUNT_1_BIT",
9859                                         i, attachment);
9860                }
9861            }
9862        }
9863
9864        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9865            uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
9866            skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Depth stencil");
9867
9868            if (!skip_call && attachment != VK_ATTACHMENT_UNUSED) {
9869                sample_count |= (unsigned)pCreateInfo->pAttachments[attachment].samples;
9870            }
9871        }
9872
9873        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9874            uint32_t attachment = subpass.pInputAttachments[j].attachment;
9875            skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Input");
9876        }
9877
9878        if (sample_count && !IsPowerOfTwo(sample_count)) {
9879            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
9880                                 __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9881                                 "CreateRenderPass:  Subpass %u attempts to render to "
9882                                 "attachments with inconsistent sample counts",
9883                                 i);
9884        }
9885    }
9886    return skip_call;
9887}
9888
9889VKAPI_ATTR VkResult VKAPI_CALL CreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
9890                                                const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) {
9891    bool skip_call = false;
9892    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9893
9894    std::unique_lock<std::mutex> lock(global_lock);
9895
9896    skip_call |= ValidateLayouts(dev_data, device, pCreateInfo);
9897    // TODO: As part of wrapping up the mem_tracker/core_validation merge the following routine should be consolidated with
9898    //       ValidateLayouts.
9899    skip_call |= ValidateRenderpassAttachmentUsage(dev_data, pCreateInfo);
9900    lock.unlock();
9901
9902    if (skip_call) {
9903        return VK_ERROR_VALIDATION_FAILED_EXT;
9904    }
9905
9906    VkResult result = dev_data->device_dispatch_table->CreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass);
9907
9908    if (VK_SUCCESS == result) {
9909        lock.lock();
9910
9911        std::vector<bool> has_self_dependency(pCreateInfo->subpassCount);
9912        std::vector<DAGNode> subpass_to_node(pCreateInfo->subpassCount);
9913        skip_call |= CreatePassDAG(dev_data, device, pCreateInfo, subpass_to_node, has_self_dependency);
9914
9915        // Shadow create info and store in map
9916        VkRenderPassCreateInfo *localRPCI = new VkRenderPassCreateInfo(*pCreateInfo);
9917        if (pCreateInfo->pAttachments) {
9918            localRPCI->pAttachments = new VkAttachmentDescription[localRPCI->attachmentCount];
9919            memcpy((void *)localRPCI->pAttachments, pCreateInfo->pAttachments,
9920                   localRPCI->attachmentCount * sizeof(VkAttachmentDescription));
9921        }
9922        if (pCreateInfo->pSubpasses) {
9923            localRPCI->pSubpasses = new VkSubpassDescription[localRPCI->subpassCount];
9924            memcpy((void *)localRPCI->pSubpasses, pCreateInfo->pSubpasses, localRPCI->subpassCount * sizeof(VkSubpassDescription));
9925
9926            for (uint32_t i = 0; i < localRPCI->subpassCount; i++) {
9927                VkSubpassDescription *subpass = (VkSubpassDescription *)&localRPCI->pSubpasses[i];
9928                const uint32_t attachmentCount = subpass->inputAttachmentCount +
9929                                                 subpass->colorAttachmentCount * (1 + (subpass->pResolveAttachments ? 1 : 0)) +
9930                                                 ((subpass->pDepthStencilAttachment) ? 1 : 0) + subpass->preserveAttachmentCount;
9931                VkAttachmentReference *attachments = new VkAttachmentReference[attachmentCount];
9932
9933                memcpy(attachments, subpass->pInputAttachments, sizeof(attachments[0]) * subpass->inputAttachmentCount);
9934                subpass->pInputAttachments = attachments;
9935                attachments += subpass->inputAttachmentCount;
9936
9937                memcpy(attachments, subpass->pColorAttachments, sizeof(attachments[0]) * subpass->colorAttachmentCount);
9938                subpass->pColorAttachments = attachments;
9939                attachments += subpass->colorAttachmentCount;
9940
9941                if (subpass->pResolveAttachments) {
9942                    memcpy(attachments, subpass->pResolveAttachments, sizeof(attachments[0]) * subpass->colorAttachmentCount);
9943                    subpass->pResolveAttachments = attachments;
9944                    attachments += subpass->colorAttachmentCount;
9945                }
9946
9947                if (subpass->pDepthStencilAttachment) {
9948                    memcpy(attachments, subpass->pDepthStencilAttachment, sizeof(attachments[0]) * 1);
9949                    subpass->pDepthStencilAttachment = attachments;
9950                    attachments += 1;
9951                }
9952
9953                memcpy(attachments, subpass->pPreserveAttachments, sizeof(attachments[0]) * subpass->preserveAttachmentCount);
9954                subpass->pPreserveAttachments = &attachments->attachment;
9955            }
9956        }
9957        if (pCreateInfo->pDependencies) {
9958            localRPCI->pDependencies = new VkSubpassDependency[localRPCI->dependencyCount];
9959            memcpy((void *)localRPCI->pDependencies, pCreateInfo->pDependencies,
9960                   localRPCI->dependencyCount * sizeof(VkSubpassDependency));
9961        }
9962
9963        auto render_pass = new RENDER_PASS_NODE(localRPCI);
9964        render_pass->renderPass = *pRenderPass;
9965        render_pass->hasSelfDependency = has_self_dependency;
9966        render_pass->subpassToNode = subpass_to_node;
9967#if MTMERGESOURCE
9968        // MTMTODO : Merge with code from above to eliminate duplication
9969        for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
9970            VkAttachmentDescription desc = pCreateInfo->pAttachments[i];
9971            MT_PASS_ATTACHMENT_INFO pass_info;
9972            pass_info.load_op = desc.loadOp;
9973            pass_info.store_op = desc.storeOp;
9974            pass_info.stencil_load_op = desc.stencilLoadOp;
9975            pass_info.stencil_store_op = desc.stencilStoreOp;
9976            pass_info.attachment = i;
9977            render_pass->attachments.push_back(pass_info);
9978        }
9979        // TODO: Maybe fill list and then copy instead of locking
9980        std::unordered_map<uint32_t, bool> &attachment_first_read = render_pass->attachment_first_read;
9981        std::unordered_map<uint32_t, VkImageLayout> &attachment_first_layout = render_pass->attachment_first_layout;
9982        for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9983            const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9984            for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9985                uint32_t attachment = subpass.pColorAttachments[j].attachment;
9986                if (!attachment_first_read.count(attachment)) {
9987                    attachment_first_read.insert(std::make_pair(attachment, false));
9988                    attachment_first_layout.insert(std::make_pair(attachment, subpass.pColorAttachments[j].layout));
9989                }
9990            }
9991            if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9992                uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
9993                if (!attachment_first_read.count(attachment)) {
9994                    attachment_first_read.insert(std::make_pair(attachment, false));
9995                    attachment_first_layout.insert(std::make_pair(attachment, subpass.pDepthStencilAttachment->layout));
9996                }
9997            }
9998            for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9999                uint32_t attachment = subpass.pInputAttachments[j].attachment;
10000                if (!attachment_first_read.count(attachment)) {
10001                    attachment_first_read.insert(std::make_pair(attachment, true));
10002                    attachment_first_layout.insert(std::make_pair(attachment, subpass.pInputAttachments[j].layout));
10003                }
10004            }
10005        }
10006#endif
10007        dev_data->renderPassMap[*pRenderPass] = render_pass;
10008    }
10009    return result;
10010}
10011
10012// Free the renderpass shadow
10013static void deleteRenderPasses(layer_data *my_data) {
10014    for (auto renderPass : my_data->renderPassMap) {
10015        const VkRenderPassCreateInfo *pRenderPassInfo = renderPass.second->pCreateInfo;
10016        delete[] pRenderPassInfo->pAttachments;
10017        if (pRenderPassInfo->pSubpasses) {
10018            for (uint32_t i = 0; i < pRenderPassInfo->subpassCount; ++i) {
10019                // Attachements are all allocated in a block, so just need to
10020                //  find the first non-null one to delete
10021                if (pRenderPassInfo->pSubpasses[i].pInputAttachments) {
10022                    delete[] pRenderPassInfo->pSubpasses[i].pInputAttachments;
10023                } else if (pRenderPassInfo->pSubpasses[i].pColorAttachments) {
10024                    delete[] pRenderPassInfo->pSubpasses[i].pColorAttachments;
10025                } else if (pRenderPassInfo->pSubpasses[i].pResolveAttachments) {
10026                    delete[] pRenderPassInfo->pSubpasses[i].pResolveAttachments;
10027                } else if (pRenderPassInfo->pSubpasses[i].pPreserveAttachments) {
10028                    delete[] pRenderPassInfo->pSubpasses[i].pPreserveAttachments;
10029                }
10030            }
10031            delete[] pRenderPassInfo->pSubpasses;
10032        }
10033        delete[] pRenderPassInfo->pDependencies;
10034        delete pRenderPassInfo;
10035        delete renderPass.second;
10036    }
10037    my_data->renderPassMap.clear();
10038}
10039
10040static bool VerifyFramebufferAndRenderPassLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin) {
10041    bool skip_call = false;
10042    const VkRenderPassCreateInfo *pRenderPassInfo = dev_data->renderPassMap[pRenderPassBegin->renderPass]->pCreateInfo;
10043    const safe_VkFramebufferCreateInfo framebufferInfo = dev_data->frameBufferMap[pRenderPassBegin->framebuffer]->createInfo;
10044    if (pRenderPassInfo->attachmentCount != framebufferInfo.attachmentCount) {
10045        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10046                             DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot start a render pass using a framebuffer "
10047                                                                 "with a different number of attachments.");
10048    }
10049    for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
10050        const VkImageView &image_view = framebufferInfo.pAttachments[i];
10051        auto view_state = getImageViewState(dev_data, image_view);
10052        assert(view_state);
10053        const VkImage &image = view_state->create_info.image;
10054        const VkImageSubresourceRange &subRange = view_state->create_info.subresourceRange;
10055        IMAGE_CMD_BUF_LAYOUT_NODE newNode = {pRenderPassInfo->pAttachments[i].initialLayout,
10056                                             pRenderPassInfo->pAttachments[i].initialLayout};
10057        // TODO: Do not iterate over every possibility - consolidate where possible
10058        for (uint32_t j = 0; j < subRange.levelCount; j++) {
10059            uint32_t level = subRange.baseMipLevel + j;
10060            for (uint32_t k = 0; k < subRange.layerCount; k++) {
10061                uint32_t layer = subRange.baseArrayLayer + k;
10062                VkImageSubresource sub = {subRange.aspectMask, level, layer};
10063                IMAGE_CMD_BUF_LAYOUT_NODE node;
10064                if (!FindLayout(pCB, image, sub, node)) {
10065                    SetLayout(pCB, image, sub, newNode);
10066                    continue;
10067                }
10068                if (newNode.layout != VK_IMAGE_LAYOUT_UNDEFINED &&
10069                    newNode.layout != node.layout) {
10070                    skip_call |=
10071                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10072                                DRAWSTATE_INVALID_RENDERPASS, "DS",
10073                                "You cannot start a render pass using attachment %u "
10074                                "where the render pass initial layout is %s and the previous "
10075                                "known layout of the attachment is %s. The layouts must match, or "
10076                                "the render pass initial layout for the attachment must be "
10077                                "VK_IMAGE_LAYOUT_UNDEFINED",
10078                                i, string_VkImageLayout(newNode.layout), string_VkImageLayout(node.layout));
10079                }
10080            }
10081        }
10082    }
10083    return skip_call;
10084}
10085
10086static void TransitionAttachmentRefLayout(layer_data *dev_data, GLOBAL_CB_NODE *pCB,
10087                                          FRAMEBUFFER_NODE *pFramebuffer,
10088                                          VkAttachmentReference ref)
10089{
10090    if (ref.attachment != VK_ATTACHMENT_UNUSED) {
10091        auto image_view = pFramebuffer->createInfo.pAttachments[ref.attachment];
10092        SetLayout(dev_data, pCB, image_view, ref.layout);
10093    }
10094}
10095
10096static void TransitionSubpassLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin,
10097                                     const int subpass_index) {
10098    auto renderPass = getRenderPass(dev_data, pRenderPassBegin->renderPass);
10099    if (!renderPass)
10100        return;
10101
10102    auto framebuffer = getFramebuffer(dev_data, pRenderPassBegin->framebuffer);
10103    if (!framebuffer)
10104        return;
10105
10106    const VkSubpassDescription &subpass = renderPass->pCreateInfo->pSubpasses[subpass_index];
10107    for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
10108        TransitionAttachmentRefLayout(dev_data, pCB, framebuffer, subpass.pInputAttachments[j]);
10109    }
10110    for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
10111        TransitionAttachmentRefLayout(dev_data, pCB, framebuffer, subpass.pColorAttachments[j]);
10112    }
10113    if (subpass.pDepthStencilAttachment) {
10114        TransitionAttachmentRefLayout(dev_data, pCB, framebuffer, *subpass.pDepthStencilAttachment);
10115    }
10116}
10117
10118static bool validatePrimaryCommandBuffer(const layer_data *my_data, const GLOBAL_CB_NODE *pCB, const std::string &cmd_name) {
10119    bool skip_call = false;
10120    if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
10121        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10122                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", "Cannot execute command %s on a secondary command buffer.",
10123                             cmd_name.c_str());
10124    }
10125    return skip_call;
10126}
10127
10128static void TransitionFinalSubpassLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin) {
10129    auto renderPass = getRenderPass(dev_data, pRenderPassBegin->renderPass);
10130    if (!renderPass)
10131        return;
10132
10133    const VkRenderPassCreateInfo *pRenderPassInfo = renderPass->pCreateInfo;
10134    auto framebuffer = getFramebuffer(dev_data, pRenderPassBegin->framebuffer);
10135    if (!framebuffer)
10136        return;
10137
10138    for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
10139        auto image_view = framebuffer->createInfo.pAttachments[i];
10140        SetLayout(dev_data, pCB, image_view, pRenderPassInfo->pAttachments[i].finalLayout);
10141    }
10142}
10143
10144static bool VerifyRenderAreaBounds(const layer_data *my_data, const VkRenderPassBeginInfo *pRenderPassBegin) {
10145    bool skip_call = false;
10146    const safe_VkFramebufferCreateInfo *pFramebufferInfo = &getFramebuffer(my_data, pRenderPassBegin->framebuffer)->createInfo;
10147    if (pRenderPassBegin->renderArea.offset.x < 0 ||
10148        (pRenderPassBegin->renderArea.offset.x + pRenderPassBegin->renderArea.extent.width) > pFramebufferInfo->width ||
10149        pRenderPassBegin->renderArea.offset.y < 0 ||
10150        (pRenderPassBegin->renderArea.offset.y + pRenderPassBegin->renderArea.extent.height) > pFramebufferInfo->height) {
10151        skip_call |= static_cast<bool>(log_msg(
10152            my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10153            DRAWSTATE_INVALID_RENDER_AREA, "CORE",
10154            "Cannot execute a render pass with renderArea not within the bound of the "
10155            "framebuffer. RenderArea: x %d, y %d, width %d, height %d. Framebuffer: width %d, "
10156            "height %d.",
10157            pRenderPassBegin->renderArea.offset.x, pRenderPassBegin->renderArea.offset.y, pRenderPassBegin->renderArea.extent.width,
10158            pRenderPassBegin->renderArea.extent.height, pFramebufferInfo->width, pFramebufferInfo->height));
10159    }
10160    return skip_call;
10161}
10162
10163// If this is a stencil format, make sure the stencil[Load|Store]Op flag is checked, while if it is a depth/color attachment the
10164// [load|store]Op flag must be checked
10165// TODO: The memory valid flag in DEVICE_MEM_INFO should probably be split to track the validity of stencil memory separately.
10166template <typename T> static bool FormatSpecificLoadAndStoreOpSettings(VkFormat format, T color_depth_op, T stencil_op, T op) {
10167    if (color_depth_op != op && stencil_op != op) {
10168        return false;
10169    }
10170    bool check_color_depth_load_op = !vk_format_is_stencil_only(format);
10171    bool check_stencil_load_op = vk_format_is_depth_and_stencil(format) || !check_color_depth_load_op;
10172
10173    return (((check_color_depth_load_op == true) && (color_depth_op == op)) ||
10174            ((check_stencil_load_op == true) && (stencil_op == op)));
10175}
10176
10177VKAPI_ATTR void VKAPI_CALL
10178CmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, VkSubpassContents contents) {
10179    bool skip_call = false;
10180    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
10181    std::unique_lock<std::mutex> lock(global_lock);
10182    GLOBAL_CB_NODE *cb_node = getCBNode(dev_data, commandBuffer);
10183    auto renderPass = pRenderPassBegin ? getRenderPass(dev_data, pRenderPassBegin->renderPass) : nullptr;
10184    auto framebuffer = pRenderPassBegin ? getFramebuffer(dev_data, pRenderPassBegin->framebuffer) : nullptr;
10185    if (cb_node) {
10186        if (renderPass) {
10187            uint32_t clear_op_size = 0; // Make sure pClearValues is at least as large as last LOAD_OP_CLEAR
10188            cb_node->activeFramebuffer = pRenderPassBegin->framebuffer;
10189            for (size_t i = 0; i < renderPass->attachments.size(); ++i) {
10190                MT_FB_ATTACHMENT_INFO &fb_info = framebuffer->attachments[i];
10191                VkFormat format = renderPass->pCreateInfo->pAttachments[renderPass->attachments[i].attachment].format;
10192                if (FormatSpecificLoadAndStoreOpSettings(format, renderPass->attachments[i].load_op,
10193                                                         renderPass->attachments[i].stencil_load_op,
10194                                                         VK_ATTACHMENT_LOAD_OP_CLEAR)) {
10195                    clear_op_size = static_cast<uint32_t>(i) + 1;
10196                    std::function<bool()> function = [=]() {
10197                        SetImageMemoryValid(dev_data, getImageNode(dev_data, fb_info.image), true);
10198                        return false;
10199                    };
10200                    cb_node->validate_functions.push_back(function);
10201                } else if (FormatSpecificLoadAndStoreOpSettings(format, renderPass->attachments[i].load_op,
10202                                                                renderPass->attachments[i].stencil_load_op,
10203                                                                VK_ATTACHMENT_LOAD_OP_DONT_CARE)) {
10204                    std::function<bool()> function = [=]() {
10205                        SetImageMemoryValid(dev_data, getImageNode(dev_data, fb_info.image), false);
10206                        return false;
10207                    };
10208                    cb_node->validate_functions.push_back(function);
10209                } else if (FormatSpecificLoadAndStoreOpSettings(format, renderPass->attachments[i].load_op,
10210                                                                renderPass->attachments[i].stencil_load_op,
10211                                                                VK_ATTACHMENT_LOAD_OP_LOAD)) {
10212                    std::function<bool()> function = [=]() {
10213                        return ValidateImageMemoryIsValid(dev_data, getImageNode(dev_data, fb_info.image),
10214                                                          "vkCmdBeginRenderPass()");
10215                    };
10216                    cb_node->validate_functions.push_back(function);
10217                }
10218                if (renderPass->attachment_first_read[renderPass->attachments[i].attachment]) {
10219                    std::function<bool()> function = [=]() {
10220                        return ValidateImageMemoryIsValid(dev_data, getImageNode(dev_data, fb_info.image),
10221                                                          "vkCmdBeginRenderPass()");
10222                    };
10223                    cb_node->validate_functions.push_back(function);
10224                }
10225            }
10226            if (clear_op_size > pRenderPassBegin->clearValueCount) {
10227                skip_call |=
10228                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
10229                            reinterpret_cast<uint64_t &>(renderPass), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
10230                            "In vkCmdBeginRenderPass() the VkRenderPassBeginInfo struct has a clearValueCount of %u but there must "
10231                            "be at least %u "
10232                            "entries in pClearValues array to account for the highest index attachment in renderPass 0x%" PRIx64
10233                            " that uses VK_ATTACHMENT_LOAD_OP_CLEAR is %u. Note that the pClearValues array "
10234                            "is indexed by attachment number so even if some pClearValues entries between 0 and %u correspond to "
10235                            "attachments that aren't cleared they will be ignored.",
10236                            pRenderPassBegin->clearValueCount, clear_op_size, reinterpret_cast<uint64_t &>(renderPass),
10237                            clear_op_size, clear_op_size - 1);
10238            }
10239            skip_call |= VerifyRenderAreaBounds(dev_data, pRenderPassBegin);
10240            skip_call |= VerifyFramebufferAndRenderPassLayouts(dev_data, cb_node, pRenderPassBegin);
10241            skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdBeginRenderPass");
10242            skip_call |= ValidateDependencies(dev_data, framebuffer, renderPass);
10243            skip_call |= validatePrimaryCommandBuffer(dev_data, cb_node, "vkCmdBeginRenderPass");
10244            skip_call |= addCmd(dev_data, cb_node, CMD_BEGINRENDERPASS, "vkCmdBeginRenderPass()");
10245            cb_node->activeRenderPass = renderPass;
10246            // This is a shallow copy as that is all that is needed for now
10247            cb_node->activeRenderPassBeginInfo = *pRenderPassBegin;
10248            cb_node->activeSubpass = 0;
10249            cb_node->activeSubpassContents = contents;
10250            cb_node->framebuffers.insert(pRenderPassBegin->framebuffer);
10251            // Connect this framebuffer and its children to this cmdBuffer
10252            AddFramebufferBinding(dev_data, cb_node, framebuffer);
10253            // transition attachments to the correct layouts for the first subpass
10254            TransitionSubpassLayouts(dev_data, cb_node, &cb_node->activeRenderPassBeginInfo, cb_node->activeSubpass);
10255        } else {
10256            skip_call |=
10257                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10258                        DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot use a NULL RenderPass object in vkCmdBeginRenderPass()");
10259        }
10260    }
10261    lock.unlock();
10262    if (!skip_call) {
10263        dev_data->device_dispatch_table->CmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
10264    }
10265}
10266
10267VKAPI_ATTR void VKAPI_CALL CmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
10268    bool skip_call = false;
10269    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
10270    std::unique_lock<std::mutex> lock(global_lock);
10271    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
10272    if (pCB) {
10273        skip_call |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdNextSubpass");
10274        skip_call |= addCmd(dev_data, pCB, CMD_NEXTSUBPASS, "vkCmdNextSubpass()");
10275        skip_call |= outsideRenderPass(dev_data, pCB, "vkCmdNextSubpass");
10276
10277        auto subpassCount = pCB->activeRenderPass->pCreateInfo->subpassCount;
10278        if (pCB->activeSubpass == subpassCount - 1) {
10279            skip_call |=
10280                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10281                        reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_INVALID_SUBPASS_INDEX, "DS",
10282                        "vkCmdNextSubpass(): Attempted to advance beyond final subpass");
10283        }
10284    }
10285    lock.unlock();
10286
10287    if (skip_call)
10288        return;
10289
10290    dev_data->device_dispatch_table->CmdNextSubpass(commandBuffer, contents);
10291
10292    if (pCB) {
10293      lock.lock();
10294      pCB->activeSubpass++;
10295      pCB->activeSubpassContents = contents;
10296      TransitionSubpassLayouts(dev_data, pCB, &pCB->activeRenderPassBeginInfo, pCB->activeSubpass);
10297    }
10298}
10299
10300VKAPI_ATTR void VKAPI_CALL CmdEndRenderPass(VkCommandBuffer commandBuffer) {
10301    bool skip_call = false;
10302    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
10303    std::unique_lock<std::mutex> lock(global_lock);
10304    auto pCB = getCBNode(dev_data, commandBuffer);
10305    if (pCB) {
10306        RENDER_PASS_NODE* pRPNode = pCB->activeRenderPass;
10307        auto framebuffer = getFramebuffer(dev_data, pCB->activeFramebuffer);
10308        if (pRPNode) {
10309            if (pCB->activeSubpass != pRPNode->pCreateInfo->subpassCount - 1) {
10310                skip_call |=
10311                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10312                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_INVALID_SUBPASS_INDEX, "DS",
10313                            "vkCmdEndRenderPass(): Called before reaching final subpass");
10314            }
10315
10316            for (size_t i = 0; i < pRPNode->attachments.size(); ++i) {
10317                MT_FB_ATTACHMENT_INFO &fb_info = framebuffer->attachments[i];
10318                VkFormat format = pRPNode->pCreateInfo->pAttachments[pRPNode->attachments[i].attachment].format;
10319                if (FormatSpecificLoadAndStoreOpSettings(format, pRPNode->attachments[i].store_op,
10320                                                         pRPNode->attachments[i].stencil_store_op, VK_ATTACHMENT_STORE_OP_STORE)) {
10321                    std::function<bool()> function = [=]() {
10322                        SetImageMemoryValid(dev_data, getImageNode(dev_data, fb_info.image), true);
10323                        return false;
10324                    };
10325                    pCB->validate_functions.push_back(function);
10326                } else if (FormatSpecificLoadAndStoreOpSettings(format, pRPNode->attachments[i].store_op,
10327                                                                pRPNode->attachments[i].stencil_store_op,
10328                                                                VK_ATTACHMENT_STORE_OP_DONT_CARE)) {
10329                    std::function<bool()> function = [=]() {
10330                        SetImageMemoryValid(dev_data, getImageNode(dev_data, fb_info.image), false);
10331                        return false;
10332                    };
10333                    pCB->validate_functions.push_back(function);
10334                }
10335            }
10336        }
10337        skip_call |= outsideRenderPass(dev_data, pCB, "vkCmdEndRenderpass");
10338        skip_call |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdEndRenderPass");
10339        skip_call |= addCmd(dev_data, pCB, CMD_ENDRENDERPASS, "vkCmdEndRenderPass()");
10340    }
10341    lock.unlock();
10342
10343    if (skip_call)
10344        return;
10345
10346    dev_data->device_dispatch_table->CmdEndRenderPass(commandBuffer);
10347
10348    if (pCB) {
10349        lock.lock();
10350        TransitionFinalSubpassLayouts(dev_data, pCB, &pCB->activeRenderPassBeginInfo);
10351        pCB->activeRenderPass = nullptr;
10352        pCB->activeSubpass = 0;
10353        pCB->activeFramebuffer = VK_NULL_HANDLE;
10354    }
10355}
10356
10357static bool logInvalidAttachmentMessage(layer_data *dev_data, VkCommandBuffer secondaryBuffer, uint32_t primaryAttach,
10358                                        uint32_t secondaryAttach, const char *msg) {
10359    return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10360                   DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10361                   "vkCmdExecuteCommands() called w/ invalid Secondary Cmd Buffer 0x%" PRIx64 " which has a render pass "
10362                   "that is not compatible with the Primary Cmd Buffer current render pass. "
10363                   "Attachment %u is not compatible with %u: %s",
10364                   reinterpret_cast<uint64_t &>(secondaryBuffer), primaryAttach, secondaryAttach, msg);
10365}
10366
10367static bool validateAttachmentCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer,
10368                                            VkRenderPassCreateInfo const *primaryPassCI, uint32_t primaryAttach,
10369                                            VkCommandBuffer secondaryBuffer, VkRenderPassCreateInfo const *secondaryPassCI,
10370                                            uint32_t secondaryAttach, bool is_multi) {
10371    bool skip_call = false;
10372    if (primaryPassCI->attachmentCount <= primaryAttach) {
10373        primaryAttach = VK_ATTACHMENT_UNUSED;
10374    }
10375    if (secondaryPassCI->attachmentCount <= secondaryAttach) {
10376        secondaryAttach = VK_ATTACHMENT_UNUSED;
10377    }
10378    if (primaryAttach == VK_ATTACHMENT_UNUSED && secondaryAttach == VK_ATTACHMENT_UNUSED) {
10379        return skip_call;
10380    }
10381    if (primaryAttach == VK_ATTACHMENT_UNUSED) {
10382        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach,
10383                                                 "The first is unused while the second is not.");
10384        return skip_call;
10385    }
10386    if (secondaryAttach == VK_ATTACHMENT_UNUSED) {
10387        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach,
10388                                                 "The second is unused while the first is not.");
10389        return skip_call;
10390    }
10391    if (primaryPassCI->pAttachments[primaryAttach].format != secondaryPassCI->pAttachments[secondaryAttach].format) {
10392        skip_call |=
10393            logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different formats.");
10394    }
10395    if (primaryPassCI->pAttachments[primaryAttach].samples != secondaryPassCI->pAttachments[secondaryAttach].samples) {
10396        skip_call |=
10397            logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different samples.");
10398    }
10399    if (is_multi && primaryPassCI->pAttachments[primaryAttach].flags != secondaryPassCI->pAttachments[secondaryAttach].flags) {
10400        skip_call |=
10401            logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different flags.");
10402    }
10403    return skip_call;
10404}
10405
10406static bool validateSubpassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer,
10407                                         VkRenderPassCreateInfo const *primaryPassCI, VkCommandBuffer secondaryBuffer,
10408                                         VkRenderPassCreateInfo const *secondaryPassCI, const int subpass, bool is_multi) {
10409    bool skip_call = false;
10410    const VkSubpassDescription &primary_desc = primaryPassCI->pSubpasses[subpass];
10411    const VkSubpassDescription &secondary_desc = secondaryPassCI->pSubpasses[subpass];
10412    uint32_t maxInputAttachmentCount = std::max(primary_desc.inputAttachmentCount, secondary_desc.inputAttachmentCount);
10413    for (uint32_t i = 0; i < maxInputAttachmentCount; ++i) {
10414        uint32_t primary_input_attach = VK_ATTACHMENT_UNUSED, secondary_input_attach = VK_ATTACHMENT_UNUSED;
10415        if (i < primary_desc.inputAttachmentCount) {
10416            primary_input_attach = primary_desc.pInputAttachments[i].attachment;
10417        }
10418        if (i < secondary_desc.inputAttachmentCount) {
10419            secondary_input_attach = secondary_desc.pInputAttachments[i].attachment;
10420        }
10421        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_input_attach, secondaryBuffer,
10422                                                     secondaryPassCI, secondary_input_attach, is_multi);
10423    }
10424    uint32_t maxColorAttachmentCount = std::max(primary_desc.colorAttachmentCount, secondary_desc.colorAttachmentCount);
10425    for (uint32_t i = 0; i < maxColorAttachmentCount; ++i) {
10426        uint32_t primary_color_attach = VK_ATTACHMENT_UNUSED, secondary_color_attach = VK_ATTACHMENT_UNUSED;
10427        if (i < primary_desc.colorAttachmentCount) {
10428            primary_color_attach = primary_desc.pColorAttachments[i].attachment;
10429        }
10430        if (i < secondary_desc.colorAttachmentCount) {
10431            secondary_color_attach = secondary_desc.pColorAttachments[i].attachment;
10432        }
10433        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_color_attach, secondaryBuffer,
10434                                                     secondaryPassCI, secondary_color_attach, is_multi);
10435        uint32_t primary_resolve_attach = VK_ATTACHMENT_UNUSED, secondary_resolve_attach = VK_ATTACHMENT_UNUSED;
10436        if (i < primary_desc.colorAttachmentCount && primary_desc.pResolveAttachments) {
10437            primary_resolve_attach = primary_desc.pResolveAttachments[i].attachment;
10438        }
10439        if (i < secondary_desc.colorAttachmentCount && secondary_desc.pResolveAttachments) {
10440            secondary_resolve_attach = secondary_desc.pResolveAttachments[i].attachment;
10441        }
10442        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_resolve_attach,
10443                                                     secondaryBuffer, secondaryPassCI, secondary_resolve_attach, is_multi);
10444    }
10445    uint32_t primary_depthstencil_attach = VK_ATTACHMENT_UNUSED, secondary_depthstencil_attach = VK_ATTACHMENT_UNUSED;
10446    if (primary_desc.pDepthStencilAttachment) {
10447        primary_depthstencil_attach = primary_desc.pDepthStencilAttachment[0].attachment;
10448    }
10449    if (secondary_desc.pDepthStencilAttachment) {
10450        secondary_depthstencil_attach = secondary_desc.pDepthStencilAttachment[0].attachment;
10451    }
10452    skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_depthstencil_attach,
10453                                                 secondaryBuffer, secondaryPassCI, secondary_depthstencil_attach, is_multi);
10454    return skip_call;
10455}
10456
10457// Verify that given renderPass CreateInfo for primary and secondary command buffers are compatible.
10458//  This function deals directly with the CreateInfo, there are overloaded versions below that can take the renderPass handle and
10459//  will then feed into this function
10460static bool validateRenderPassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer,
10461                                            VkRenderPassCreateInfo const *primaryPassCI, VkCommandBuffer secondaryBuffer,
10462                                            VkRenderPassCreateInfo const *secondaryPassCI) {
10463    bool skip_call = false;
10464
10465    if (primaryPassCI->subpassCount != secondaryPassCI->subpassCount) {
10466        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10467                             DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10468                             "vkCmdExecuteCommands() called w/ invalid secondary Cmd Buffer 0x%" PRIx64
10469                             " that has a subpassCount of %u that is incompatible with the primary Cmd Buffer 0x%" PRIx64
10470                             " that has a subpassCount of %u.",
10471                             reinterpret_cast<uint64_t &>(secondaryBuffer), secondaryPassCI->subpassCount,
10472                             reinterpret_cast<uint64_t &>(primaryBuffer), primaryPassCI->subpassCount);
10473    } else {
10474        for (uint32_t i = 0; i < primaryPassCI->subpassCount; ++i) {
10475            skip_call |= validateSubpassCompatibility(dev_data, primaryBuffer, primaryPassCI, secondaryBuffer, secondaryPassCI, i,
10476                                                      primaryPassCI->subpassCount > 1);
10477        }
10478    }
10479    return skip_call;
10480}
10481
10482static bool validateFramebuffer(layer_data *dev_data, VkCommandBuffer primaryBuffer, const GLOBAL_CB_NODE *pCB,
10483                                VkCommandBuffer secondaryBuffer, const GLOBAL_CB_NODE *pSubCB) {
10484    bool skip_call = false;
10485    if (!pSubCB->beginInfo.pInheritanceInfo) {
10486        return skip_call;
10487    }
10488    VkFramebuffer primary_fb = pCB->activeFramebuffer;
10489    VkFramebuffer secondary_fb = pSubCB->beginInfo.pInheritanceInfo->framebuffer;
10490    if (secondary_fb != VK_NULL_HANDLE) {
10491        if (primary_fb != secondary_fb) {
10492            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10493                                 DRAWSTATE_FRAMEBUFFER_INCOMPATIBLE, "DS",
10494                                 "vkCmdExecuteCommands() called w/ invalid secondary Cmd Buffer 0x%" PRIx64
10495                                 " which has a framebuffer 0x%" PRIx64
10496                                 " that is not the same as the primaryCB's current active framebuffer 0x%" PRIx64 ".",
10497                                 reinterpret_cast<uint64_t &>(secondaryBuffer), reinterpret_cast<uint64_t &>(secondary_fb),
10498                                 reinterpret_cast<uint64_t &>(primary_fb));
10499        }
10500        auto fb = getFramebuffer(dev_data, secondary_fb);
10501        if (!fb) {
10502            skip_call |=
10503                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10504                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
10505                                                                          "which has invalid framebuffer 0x%" PRIx64 ".",
10506                        (void *)secondaryBuffer, (uint64_t)(secondary_fb));
10507            return skip_call;
10508        }
10509        auto cb_renderpass = getRenderPass(dev_data, pSubCB->beginInfo.pInheritanceInfo->renderPass);
10510        if (cb_renderpass->renderPass != fb->createInfo.renderPass) {
10511            skip_call |= validateRenderPassCompatibility(dev_data, secondaryBuffer, fb->renderPassCreateInfo.ptr(), secondaryBuffer,
10512                                                         cb_renderpass->pCreateInfo);
10513        }
10514    }
10515    return skip_call;
10516}
10517
10518static bool validateSecondaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, GLOBAL_CB_NODE *pSubCB) {
10519    bool skip_call = false;
10520    unordered_set<int> activeTypes;
10521    for (auto queryObject : pCB->activeQueries) {
10522        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
10523        if (queryPoolData != dev_data->queryPoolMap.end()) {
10524            if (queryPoolData->second.createInfo.queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS &&
10525                pSubCB->beginInfo.pInheritanceInfo) {
10526                VkQueryPipelineStatisticFlags cmdBufStatistics = pSubCB->beginInfo.pInheritanceInfo->pipelineStatistics;
10527                if ((cmdBufStatistics & queryPoolData->second.createInfo.pipelineStatistics) != cmdBufStatistics) {
10528                    skip_call |= log_msg(
10529                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10530                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10531                        "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
10532                        "which has invalid active query pool 0x%" PRIx64 ". Pipeline statistics is being queried so the command "
10533                        "buffer must have all bits set on the queryPool.",
10534                        reinterpret_cast<void *>(pCB->commandBuffer), reinterpret_cast<const uint64_t &>(queryPoolData->first));
10535                }
10536            }
10537            activeTypes.insert(queryPoolData->second.createInfo.queryType);
10538        }
10539    }
10540    for (auto queryObject : pSubCB->startedQueries) {
10541        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
10542        if (queryPoolData != dev_data->queryPoolMap.end() && activeTypes.count(queryPoolData->second.createInfo.queryType)) {
10543            skip_call |=
10544                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10545                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10546                        "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
10547                        "which has invalid active query pool 0x%" PRIx64 "of type %d but a query of that type has been started on "
10548                        "secondary Cmd Buffer 0x%p.",
10549                        reinterpret_cast<void *>(pCB->commandBuffer), reinterpret_cast<const uint64_t &>(queryPoolData->first),
10550                        queryPoolData->second.createInfo.queryType, reinterpret_cast<void *>(pSubCB->commandBuffer));
10551        }
10552    }
10553
10554    auto primary_pool = getCommandPoolNode(dev_data, pCB->createInfo.commandPool);
10555    auto secondary_pool = getCommandPoolNode(dev_data, pSubCB->createInfo.commandPool);
10556    if (primary_pool && secondary_pool && (primary_pool->queueFamilyIndex != secondary_pool->queueFamilyIndex)) {
10557        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10558                             reinterpret_cast<uint64_t>(pSubCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_QUEUE_FAMILY, "DS",
10559                             "vkCmdExecuteCommands(): Primary command buffer 0x%" PRIxLEAST64
10560                             " created in queue family %d has secondary command buffer 0x%" PRIxLEAST64 " created in queue family %d.",
10561                             reinterpret_cast<uint64_t>(pCB->commandBuffer), primary_pool->queueFamilyIndex,
10562                             reinterpret_cast<uint64_t>(pSubCB->commandBuffer), secondary_pool->queueFamilyIndex);
10563    }
10564
10565    return skip_call;
10566}
10567
10568VKAPI_ATTR void VKAPI_CALL
10569CmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount, const VkCommandBuffer *pCommandBuffers) {
10570    bool skip_call = false;
10571    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
10572    std::unique_lock<std::mutex> lock(global_lock);
10573    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
10574    if (pCB) {
10575        GLOBAL_CB_NODE *pSubCB = NULL;
10576        for (uint32_t i = 0; i < commandBuffersCount; i++) {
10577            pSubCB = getCBNode(dev_data, pCommandBuffers[i]);
10578            if (!pSubCB) {
10579                skip_call |=
10580                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10581                            DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10582                            "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p in element %u of pCommandBuffers array.",
10583                            (void *)pCommandBuffers[i], i);
10584            } else if (VK_COMMAND_BUFFER_LEVEL_PRIMARY == pSubCB->createInfo.level) {
10585                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
10586                                     __LINE__, DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10587                                     "vkCmdExecuteCommands() called w/ Primary Cmd Buffer 0x%p in element %u of pCommandBuffers "
10588                                     "array. All cmd buffers in pCommandBuffers array must be secondary.",
10589                                     (void *)pCommandBuffers[i], i);
10590            } else if (pCB->activeRenderPass) { // Secondary CB w/i RenderPass must have *CONTINUE_BIT set
10591                auto secondary_rp_node = getRenderPass(dev_data, pSubCB->beginInfo.pInheritanceInfo->renderPass);
10592                if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
10593                    skip_call |= log_msg(
10594                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10595                        (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
10596                        "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) executed within render pass (0x%" PRIxLEAST64
10597                        ") must have had vkBeginCommandBuffer() called w/ VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set.",
10598                        (void *)pCommandBuffers[i], (uint64_t)pCB->activeRenderPass->renderPass);
10599                } else {
10600                    // Make sure render pass is compatible with parent command buffer pass if has continue
10601                    if (pCB->activeRenderPass->renderPass != secondary_rp_node->renderPass) {
10602                        skip_call |= validateRenderPassCompatibility(dev_data, commandBuffer, pCB->activeRenderPass->pCreateInfo,
10603                                                                    pCommandBuffers[i], secondary_rp_node->pCreateInfo);
10604                    }
10605                    //  If framebuffer for secondary CB is not NULL, then it must match active FB from primaryCB
10606                    skip_call |= validateFramebuffer(dev_data, commandBuffer, pCB, pCommandBuffers[i], pSubCB);
10607                }
10608                string errorString = "";
10609                // secondaryCB must have been created w/ RP compatible w/ primaryCB active renderpass
10610                if ((pCB->activeRenderPass->renderPass != secondary_rp_node->renderPass) &&
10611                    !verify_renderpass_compatibility(dev_data, pCB->activeRenderPass->pCreateInfo, secondary_rp_node->pCreateInfo,
10612                                                     errorString)) {
10613                    skip_call |= log_msg(
10614                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10615                        (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
10616                        "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) w/ render pass (0x%" PRIxLEAST64
10617                        ") is incompatible w/ primary command buffer (0x%p) w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
10618                        (void *)pCommandBuffers[i], (uint64_t)pSubCB->beginInfo.pInheritanceInfo->renderPass, (void *)commandBuffer,
10619                        (uint64_t)pCB->activeRenderPass->renderPass, errorString.c_str());
10620                }
10621            }
10622            // TODO(mlentine): Move more logic into this method
10623            skip_call |= validateSecondaryCommandBufferState(dev_data, pCB, pSubCB);
10624            skip_call |= validateCommandBufferState(dev_data, pSubCB);
10625            // Secondary cmdBuffers are considered pending execution starting w/
10626            // being recorded
10627            if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
10628                if (dev_data->globalInFlightCmdBuffers.find(pSubCB->commandBuffer) != dev_data->globalInFlightCmdBuffers.end()) {
10629                    skip_call |= log_msg(
10630                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10631                        (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
10632                        "Attempt to simultaneously execute CB 0x%" PRIxLEAST64 " w/o VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
10633                        "set!",
10634                        (uint64_t)(pCB->commandBuffer));
10635                }
10636                if (pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) {
10637                    // Warn that non-simultaneous secondary cmd buffer renders primary non-simultaneous
10638                    skip_call |= log_msg(
10639                        dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10640                        (uint64_t)(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
10641                        "vkCmdExecuteCommands(): Secondary Command Buffer (0x%" PRIxLEAST64
10642                        ") does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary command buffer "
10643                        "(0x%" PRIxLEAST64 ") to be treated as if it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
10644                        "set, even though it does.",
10645                        (uint64_t)(pCommandBuffers[i]), (uint64_t)(pCB->commandBuffer));
10646                    pCB->beginInfo.flags &= ~VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
10647                }
10648            }
10649            if (!pCB->activeQueries.empty() && !dev_data->phys_dev_properties.features.inheritedQueries) {
10650                skip_call |=
10651                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10652                            reinterpret_cast<uint64_t>(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
10653                            "vkCmdExecuteCommands(): Secondary Command Buffer "
10654                            "(0x%" PRIxLEAST64 ") cannot be submitted with a query in "
10655                            "flight and inherited queries not "
10656                            "supported on this device.",
10657                            reinterpret_cast<uint64_t>(pCommandBuffers[i]));
10658            }
10659            pSubCB->primaryCommandBuffer = pCB->commandBuffer;
10660            pCB->secondaryCommandBuffers.insert(pSubCB->commandBuffer);
10661            dev_data->globalInFlightCmdBuffers.insert(pSubCB->commandBuffer);
10662            for (auto &function : pSubCB->queryUpdates) {
10663                pCB->queryUpdates.push_back(function);
10664            }
10665        }
10666        skip_call |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdExecuteComands");
10667        skip_call |= addCmd(dev_data, pCB, CMD_EXECUTECOMMANDS, "vkCmdExecuteComands()");
10668    }
10669    lock.unlock();
10670    if (!skip_call)
10671        dev_data->device_dispatch_table->CmdExecuteCommands(commandBuffer, commandBuffersCount, pCommandBuffers);
10672}
10673
10674// For any image objects that overlap mapped memory, verify that their layouts are PREINIT or GENERAL
10675static bool ValidateMapImageLayouts(VkDevice device, DEVICE_MEM_INFO const *mem_info, VkDeviceSize offset,
10676                                    VkDeviceSize end_offset) {
10677    bool skip_call = false;
10678    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10679    // Iterate over all bound image ranges and verify that for any that overlap the
10680    //  map ranges, the layouts are VK_IMAGE_LAYOUT_PREINITIALIZED or VK_IMAGE_LAYOUT_GENERAL
10681    // TODO : This can be optimized if we store ranges based on starting address and early exit when we pass our range
10682    for (auto image_handle : mem_info->bound_images) {
10683        auto img_it = mem_info->bound_ranges.find(image_handle);
10684        if (img_it != mem_info->bound_ranges.end()) {
10685            if (rangesIntersect(dev_data, &img_it->second, offset, end_offset)) {
10686                std::vector<VkImageLayout> layouts;
10687                if (FindLayouts(dev_data, VkImage(image_handle), layouts)) {
10688                    for (auto layout : layouts) {
10689                        if (layout != VK_IMAGE_LAYOUT_PREINITIALIZED && layout != VK_IMAGE_LAYOUT_GENERAL) {
10690                            skip_call |=
10691                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
10692                                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot map an image with layout %s. Only "
10693                                                                                        "GENERAL or PREINITIALIZED are supported.",
10694                                        string_VkImageLayout(layout));
10695                        }
10696                    }
10697                }
10698            }
10699        }
10700    }
10701    return skip_call;
10702}
10703
10704VKAPI_ATTR VkResult VKAPI_CALL
10705MapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags, void **ppData) {
10706    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10707
10708    bool skip_call = false;
10709    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10710    std::unique_lock<std::mutex> lock(global_lock);
10711    DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem);
10712    if (mem_info) {
10713        // TODO : This could me more fine-grained to track just region that is valid
10714        mem_info->global_valid = true;
10715        auto end_offset = (VK_WHOLE_SIZE == size) ? mem_info->alloc_info.allocationSize - 1 : offset + size - 1;
10716        skip_call |= ValidateMapImageLayouts(device, mem_info, offset, end_offset);
10717        // TODO : Do we need to create new "bound_range" for the mapped range?
10718        SetMemRangesValid(dev_data, mem_info, offset, end_offset);
10719        if ((dev_data->phys_dev_mem_props.memoryTypes[mem_info->alloc_info.memoryTypeIndex].propertyFlags &
10720             VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
10721            skip_call =
10722                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10723                        (uint64_t)mem, __LINE__, MEMTRACK_INVALID_STATE, "MEM",
10724                        "Mapping Memory without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set: mem obj 0x%" PRIxLEAST64, (uint64_t)mem);
10725        }
10726    }
10727    skip_call |= ValidateMapMemRange(dev_data, mem, offset, size);
10728    lock.unlock();
10729
10730    if (!skip_call) {
10731        result = dev_data->device_dispatch_table->MapMemory(device, mem, offset, size, flags, ppData);
10732        if (VK_SUCCESS == result) {
10733            lock.lock();
10734            // TODO : What's the point of this range? See comment on creating new "bound_range" above, which may replace this
10735            storeMemRanges(dev_data, mem, offset, size);
10736            initializeAndTrackMemory(dev_data, mem, offset, size, ppData);
10737            lock.unlock();
10738        }
10739    }
10740    return result;
10741}
10742
10743VKAPI_ATTR void VKAPI_CALL UnmapMemory(VkDevice device, VkDeviceMemory mem) {
10744    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10745    bool skip_call = false;
10746
10747    std::unique_lock<std::mutex> lock(global_lock);
10748    skip_call |= deleteMemRanges(my_data, mem);
10749    lock.unlock();
10750    if (!skip_call) {
10751        my_data->device_dispatch_table->UnmapMemory(device, mem);
10752    }
10753}
10754
10755static bool validateMemoryIsMapped(layer_data *my_data, const char *funcName, uint32_t memRangeCount,
10756                                   const VkMappedMemoryRange *pMemRanges) {
10757    bool skip_call = false;
10758    for (uint32_t i = 0; i < memRangeCount; ++i) {
10759        auto mem_info = getMemObjInfo(my_data, pMemRanges[i].memory);
10760        if (mem_info) {
10761            if (mem_info->mem_range.offset > pMemRanges[i].offset) {
10762                skip_call |=
10763                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10764                            (uint64_t)pMemRanges[i].memory, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
10765                            "%s: Flush/Invalidate offset (" PRINTF_SIZE_T_SPECIFIER ") is less than Memory Object's offset "
10766                            "(" PRINTF_SIZE_T_SPECIFIER ").",
10767                            funcName, static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(mem_info->mem_range.offset));
10768            }
10769
10770            const uint64_t my_dataTerminus = (mem_info->mem_range.size == VK_WHOLE_SIZE)
10771                                                 ? mem_info->alloc_info.allocationSize
10772                                                 : (mem_info->mem_range.offset + mem_info->mem_range.size);
10773            if (pMemRanges[i].size != VK_WHOLE_SIZE && (my_dataTerminus < (pMemRanges[i].offset + pMemRanges[i].size))) {
10774                skip_call |= log_msg(
10775                    my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10776                    (uint64_t)pMemRanges[i].memory, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
10777                    "%s: Flush/Invalidate upper-bound (" PRINTF_SIZE_T_SPECIFIER ") exceeds the Memory Object's upper-bound "
10778                    "(" PRINTF_SIZE_T_SPECIFIER ").",
10779                    funcName, static_cast<size_t>(pMemRanges[i].offset + pMemRanges[i].size), static_cast<size_t>(my_dataTerminus));
10780            }
10781        }
10782    }
10783    return skip_call;
10784}
10785
10786static bool ValidateAndCopyNoncoherentMemoryToDriver(layer_data *my_data, uint32_t memRangeCount,
10787                                                     const VkMappedMemoryRange *pMemRanges) {
10788    bool skip_call = false;
10789    for (uint32_t i = 0; i < memRangeCount; ++i) {
10790        auto mem_info = getMemObjInfo(my_data, pMemRanges[i].memory);
10791        if (mem_info) {
10792            if (mem_info->shadow_copy) {
10793                VkDeviceSize size = (mem_info->mem_range.size != VK_WHOLE_SIZE)
10794                                        ? mem_info->mem_range.size
10795                                        : (mem_info->alloc_info.allocationSize - pMemRanges[i].offset);
10796                char *data = static_cast<char *>(mem_info->shadow_copy);
10797                for (uint64_t j = 0; j < mem_info->shadow_pad_size; ++j) {
10798                    if (data[j] != NoncoherentMemoryFillValue) {
10799                        skip_call |= log_msg(
10800                            my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10801                            (uint64_t)pMemRanges[i].memory, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
10802                            "Memory underflow was detected on mem obj 0x%" PRIxLEAST64, (uint64_t)pMemRanges[i].memory);
10803                    }
10804                }
10805                for (uint64_t j = (size + mem_info->shadow_pad_size); j < (2 * mem_info->shadow_pad_size + size); ++j) {
10806                    if (data[j] != NoncoherentMemoryFillValue) {
10807                        skip_call |= log_msg(
10808                            my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10809                            (uint64_t)pMemRanges[i].memory, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
10810                            "Memory overflow was detected on mem obj 0x%" PRIxLEAST64, (uint64_t)pMemRanges[i].memory);
10811                    }
10812                }
10813                memcpy(mem_info->p_driver_data, static_cast<void *>(data + mem_info->shadow_pad_size), (size_t)(size));
10814            }
10815        }
10816    }
10817    return skip_call;
10818}
10819
10820static void CopyNoncoherentMemoryFromDriver(layer_data *my_data, uint32_t memory_range_count,
10821                                            const VkMappedMemoryRange *mem_ranges) {
10822    for (uint32_t i = 0; i < memory_range_count; ++i) {
10823        auto mem_info = getMemObjInfo(my_data, mem_ranges[i].memory);
10824        if (mem_info && mem_info->shadow_copy) {
10825            VkDeviceSize size = (mem_info->mem_range.size != VK_WHOLE_SIZE)
10826                                    ? mem_info->mem_range.size
10827                                    : (mem_info->alloc_info.allocationSize - mem_ranges[i].offset);
10828            char *data = static_cast<char *>(mem_info->shadow_copy);
10829            memcpy(data + mem_info->shadow_pad_size, mem_info->p_driver_data, (size_t)(size));
10830        }
10831    }
10832}
10833
10834VkResult VKAPI_CALL
10835FlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) {
10836    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10837    bool skip_call = false;
10838    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10839
10840    std::unique_lock<std::mutex> lock(global_lock);
10841    skip_call |= ValidateAndCopyNoncoherentMemoryToDriver(my_data, memRangeCount, pMemRanges);
10842    skip_call |= validateMemoryIsMapped(my_data, "vkFlushMappedMemoryRanges", memRangeCount, pMemRanges);
10843    lock.unlock();
10844    if (!skip_call) {
10845        result = my_data->device_dispatch_table->FlushMappedMemoryRanges(device, memRangeCount, pMemRanges);
10846    }
10847    return result;
10848}
10849
10850VkResult VKAPI_CALL
10851InvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) {
10852    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10853    bool skip_call = false;
10854    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10855
10856    std::unique_lock<std::mutex> lock(global_lock);
10857    skip_call |= validateMemoryIsMapped(my_data, "vkInvalidateMappedMemoryRanges", memRangeCount, pMemRanges);
10858    lock.unlock();
10859    if (!skip_call) {
10860        result = my_data->device_dispatch_table->InvalidateMappedMemoryRanges(device, memRangeCount, pMemRanges);
10861        // Update our shadow copy with modified driver data
10862        CopyNoncoherentMemoryFromDriver(my_data, memRangeCount, pMemRanges);
10863    }
10864    return result;
10865}
10866
10867VKAPI_ATTR VkResult VKAPI_CALL BindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
10868    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10869    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10870    bool skip_call = false;
10871    std::unique_lock<std::mutex> lock(global_lock);
10872    auto image_node = getImageNode(dev_data, image);
10873    if (image_node) {
10874        // Track objects tied to memory
10875        uint64_t image_handle = reinterpret_cast<uint64_t &>(image);
10876        skip_call = set_mem_binding(dev_data, mem, image_handle, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "vkBindImageMemory");
10877        VkMemoryRequirements memRequirements;
10878        lock.unlock();
10879        dev_data->device_dispatch_table->GetImageMemoryRequirements(device, image, &memRequirements);
10880        lock.lock();
10881
10882        // Track and validate bound memory range information
10883        auto mem_info = getMemObjInfo(dev_data, mem);
10884        if (mem_info) {
10885            skip_call |= InsertImageMemoryRange(dev_data, image, mem_info, memoryOffset, memRequirements,
10886                                                image_node->createInfo.tiling == VK_IMAGE_TILING_LINEAR);
10887            skip_call |= ValidateMemoryTypes(dev_data, mem_info, memRequirements.memoryTypeBits, "vkBindImageMemory");
10888        }
10889
10890        print_mem_list(dev_data);
10891        lock.unlock();
10892        if (!skip_call) {
10893            result = dev_data->device_dispatch_table->BindImageMemory(device, image, mem, memoryOffset);
10894            lock.lock();
10895            image_node->mem = mem;
10896            image_node->memOffset = memoryOffset;
10897            image_node->memSize = memRequirements.size;
10898            lock.unlock();
10899        }
10900    } else {
10901        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10902                reinterpret_cast<const uint64_t &>(image), __LINE__, MEMTRACK_INVALID_OBJECT, "MT",
10903                "vkBindImageMemory: Cannot find invalid image 0x%" PRIx64 ", has it already been deleted?",
10904                reinterpret_cast<const uint64_t &>(image));
10905    }
10906    return result;
10907}
10908
10909VKAPI_ATTR VkResult VKAPI_CALL SetEvent(VkDevice device, VkEvent event) {
10910    bool skip_call = false;
10911    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10912    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10913    std::unique_lock<std::mutex> lock(global_lock);
10914    auto event_node = getEventNode(dev_data, event);
10915    if (event_node) {
10916        event_node->needsSignaled = false;
10917        event_node->stageMask = VK_PIPELINE_STAGE_HOST_BIT;
10918        if (event_node->write_in_use) {
10919            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
10920                                 reinterpret_cast<const uint64_t &>(event), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
10921                                 "Cannot call vkSetEvent() on event 0x%" PRIxLEAST64 " that is already in use by a command buffer.",
10922                                 reinterpret_cast<const uint64_t &>(event));
10923        }
10924    }
10925    lock.unlock();
10926    // Host setting event is visible to all queues immediately so update stageMask for any queue that's seen this event
10927    // TODO : For correctness this needs separate fix to verify that app doesn't make incorrect assumptions about the
10928    // ordering of this command in relation to vkCmd[Set|Reset]Events (see GH297)
10929    for (auto queue_data : dev_data->queueMap) {
10930        auto event_entry = queue_data.second.eventToStageMap.find(event);
10931        if (event_entry != queue_data.second.eventToStageMap.end()) {
10932            event_entry->second |= VK_PIPELINE_STAGE_HOST_BIT;
10933        }
10934    }
10935    if (!skip_call)
10936        result = dev_data->device_dispatch_table->SetEvent(device, event);
10937    return result;
10938}
10939
10940VKAPI_ATTR VkResult VKAPI_CALL
10941QueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo, VkFence fence) {
10942    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
10943    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10944    bool skip_call = false;
10945    std::unique_lock<std::mutex> lock(global_lock);
10946    auto pFence = getFenceNode(dev_data, fence);
10947    auto pQueue = getQueueNode(dev_data, queue);
10948
10949    // First verify that fence is not in use
10950    skip_call |= ValidateFenceForSubmit(dev_data, pFence);
10951
10952    if (pFence) {
10953        SubmitFence(pQueue, pFence, bindInfoCount);
10954    }
10955
10956    for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) {
10957        const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx];
10958        // Track objects tied to memory
10959        for (uint32_t j = 0; j < bindInfo.bufferBindCount; j++) {
10960            for (uint32_t k = 0; k < bindInfo.pBufferBinds[j].bindCount; k++) {
10961                if (set_sparse_mem_binding(dev_data, bindInfo.pBufferBinds[j].pBinds[k].memory,
10962                                           (uint64_t)bindInfo.pBufferBinds[j].buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
10963                                           "vkQueueBindSparse"))
10964                    skip_call = true;
10965            }
10966        }
10967        for (uint32_t j = 0; j < bindInfo.imageOpaqueBindCount; j++) {
10968            for (uint32_t k = 0; k < bindInfo.pImageOpaqueBinds[j].bindCount; k++) {
10969                if (set_sparse_mem_binding(dev_data, bindInfo.pImageOpaqueBinds[j].pBinds[k].memory,
10970                                           (uint64_t)bindInfo.pImageOpaqueBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
10971                                           "vkQueueBindSparse"))
10972                    skip_call = true;
10973            }
10974        }
10975        for (uint32_t j = 0; j < bindInfo.imageBindCount; j++) {
10976            for (uint32_t k = 0; k < bindInfo.pImageBinds[j].bindCount; k++) {
10977                if (set_sparse_mem_binding(dev_data, bindInfo.pImageBinds[j].pBinds[k].memory,
10978                                           (uint64_t)bindInfo.pImageBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
10979                                           "vkQueueBindSparse"))
10980                    skip_call = true;
10981            }
10982        }
10983
10984        std::vector<SEMAPHORE_WAIT> semaphore_waits;
10985        std::vector<VkSemaphore> semaphore_signals;
10986        for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) {
10987            VkSemaphore semaphore = bindInfo.pWaitSemaphores[i];
10988            auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
10989            if (pSemaphore) {
10990                if (pSemaphore->signaled) {
10991                    if (pSemaphore->signaler.first != VK_NULL_HANDLE) {
10992                        semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second});
10993                        pSemaphore->in_use.fetch_add(1);
10994                    }
10995                    pSemaphore->signaler.first = VK_NULL_HANDLE;
10996                    pSemaphore->signaled = false;
10997                } else {
10998                    skip_call |=
10999                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
11000                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
11001                                "vkQueueBindSparse: Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64
11002                                " that has no way to be signaled.",
11003                                reinterpret_cast<const uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
11004                }
11005            }
11006        }
11007        for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) {
11008            VkSemaphore semaphore = bindInfo.pSignalSemaphores[i];
11009            auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
11010            if (pSemaphore) {
11011                if (pSemaphore->signaled) {
11012                    skip_call =
11013                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
11014                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
11015                                "vkQueueBindSparse: Queue 0x%" PRIx64 " is signaling semaphore 0x%" PRIx64
11016                                ", but that semaphore is already signaled.",
11017                                reinterpret_cast<const uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
11018                }
11019                else {
11020                    pSemaphore->signaler.first = queue;
11021                    pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1;
11022                    pSemaphore->signaled = true;
11023                    pSemaphore->in_use.fetch_add(1);
11024                    semaphore_signals.push_back(semaphore);
11025                }
11026            }
11027        }
11028
11029        pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(),
11030                                         semaphore_waits,
11031                                         semaphore_signals,
11032                                         bindIdx == bindInfoCount - 1 ? fence : VK_NULL_HANDLE);
11033    }
11034
11035    if (pFence && !bindInfoCount) {
11036        // No work to do, just dropping a fence in the queue by itself.
11037        pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(),
11038                                         std::vector<SEMAPHORE_WAIT>(),
11039                                         std::vector<VkSemaphore>(),
11040                                         fence);
11041    }
11042
11043    print_mem_list(dev_data);
11044    lock.unlock();
11045
11046    if (!skip_call)
11047        return dev_data->device_dispatch_table->QueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
11048
11049    return result;
11050}
11051
11052VKAPI_ATTR VkResult VKAPI_CALL CreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo,
11053                                               const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore) {
11054    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11055    VkResult result = dev_data->device_dispatch_table->CreateSemaphore(device, pCreateInfo, pAllocator, pSemaphore);
11056    if (result == VK_SUCCESS) {
11057        std::lock_guard<std::mutex> lock(global_lock);
11058        SEMAPHORE_NODE* sNode = &dev_data->semaphoreMap[*pSemaphore];
11059        sNode->signaler.first = VK_NULL_HANDLE;
11060        sNode->signaler.second = 0;
11061        sNode->signaled = false;
11062    }
11063    return result;
11064}
11065
11066VKAPI_ATTR VkResult VKAPI_CALL
11067CreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkEvent *pEvent) {
11068    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11069    VkResult result = dev_data->device_dispatch_table->CreateEvent(device, pCreateInfo, pAllocator, pEvent);
11070    if (result == VK_SUCCESS) {
11071        std::lock_guard<std::mutex> lock(global_lock);
11072        dev_data->eventMap[*pEvent].needsSignaled = false;
11073        dev_data->eventMap[*pEvent].write_in_use = 0;
11074        dev_data->eventMap[*pEvent].stageMask = VkPipelineStageFlags(0);
11075    }
11076    return result;
11077}
11078
11079VKAPI_ATTR VkResult VKAPI_CALL CreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
11080                                                  const VkAllocationCallbacks *pAllocator,
11081                                                  VkSwapchainKHR *pSwapchain) {
11082    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11083    VkResult result = dev_data->device_dispatch_table->CreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);
11084
11085    if (VK_SUCCESS == result) {
11086        std::lock_guard<std::mutex> lock(global_lock);
11087        dev_data->device_extensions.swapchainMap[*pSwapchain] = unique_ptr<SWAPCHAIN_NODE>(new SWAPCHAIN_NODE(pCreateInfo));
11088    }
11089
11090    return result;
11091}
11092
11093VKAPI_ATTR void VKAPI_CALL
11094DestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
11095    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11096    bool skip_call = false;
11097
11098    std::unique_lock<std::mutex> lock(global_lock);
11099    auto swapchain_data = getSwapchainNode(dev_data, swapchain);
11100    if (swapchain_data) {
11101        if (swapchain_data->images.size() > 0) {
11102            for (auto swapchain_image : swapchain_data->images) {
11103                auto image_sub = dev_data->imageSubresourceMap.find(swapchain_image);
11104                if (image_sub != dev_data->imageSubresourceMap.end()) {
11105                    for (auto imgsubpair : image_sub->second) {
11106                        auto image_item = dev_data->imageLayoutMap.find(imgsubpair);
11107                        if (image_item != dev_data->imageLayoutMap.end()) {
11108                            dev_data->imageLayoutMap.erase(image_item);
11109                        }
11110                    }
11111                    dev_data->imageSubresourceMap.erase(image_sub);
11112                }
11113                skip_call =
11114                    clear_object_binding(dev_data, (uint64_t)swapchain_image, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT);
11115                dev_data->imageMap.erase(swapchain_image);
11116            }
11117        }
11118        dev_data->device_extensions.swapchainMap.erase(swapchain);
11119    }
11120    lock.unlock();
11121    if (!skip_call)
11122        dev_data->device_dispatch_table->DestroySwapchainKHR(device, swapchain, pAllocator);
11123}
11124
11125VKAPI_ATTR VkResult VKAPI_CALL
11126GetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pCount, VkImage *pSwapchainImages) {
11127    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11128    VkResult result = dev_data->device_dispatch_table->GetSwapchainImagesKHR(device, swapchain, pCount, pSwapchainImages);
11129
11130    if (result == VK_SUCCESS && pSwapchainImages != NULL) {
11131        // This should never happen and is checked by param checker.
11132        if (!pCount)
11133            return result;
11134        std::lock_guard<std::mutex> lock(global_lock);
11135        const size_t count = *pCount;
11136        auto swapchain_node = getSwapchainNode(dev_data, swapchain);
11137        if (swapchain_node && !swapchain_node->images.empty()) {
11138            // TODO : Not sure I like the memcmp here, but it works
11139            const bool mismatch = (swapchain_node->images.size() != count ||
11140                                   memcmp(&swapchain_node->images[0], pSwapchainImages, sizeof(swapchain_node->images[0]) * count));
11141            if (mismatch) {
11142                // TODO: Verify against Valid Usage section of extension
11143                log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
11144                        (uint64_t)swapchain, __LINE__, MEMTRACK_NONE, "SWAP_CHAIN",
11145                        "vkGetSwapchainInfoKHR(0x%" PRIx64
11146                        ", VK_SWAP_CHAIN_INFO_TYPE_PERSISTENT_IMAGES_KHR) returned mismatching data",
11147                        (uint64_t)(swapchain));
11148            }
11149        }
11150        for (uint32_t i = 0; i < *pCount; ++i) {
11151            IMAGE_LAYOUT_NODE image_layout_node;
11152            image_layout_node.layout = VK_IMAGE_LAYOUT_UNDEFINED;
11153            image_layout_node.format = swapchain_node->createInfo.imageFormat;
11154            // Add imageMap entries for each swapchain image
11155            VkImageCreateInfo image_ci = {};
11156            image_ci.mipLevels = 1;
11157            image_ci.arrayLayers = swapchain_node->createInfo.imageArrayLayers;
11158            image_ci.usage = swapchain_node->createInfo.imageUsage;
11159            image_ci.format = swapchain_node->createInfo.imageFormat;
11160            image_ci.samples = VK_SAMPLE_COUNT_1_BIT;
11161            image_ci.extent.width = swapchain_node->createInfo.imageExtent.width;
11162            image_ci.extent.height = swapchain_node->createInfo.imageExtent.height;
11163            image_ci.sharingMode = swapchain_node->createInfo.imageSharingMode;
11164            dev_data->imageMap[pSwapchainImages[i]] = unique_ptr<IMAGE_NODE>(new IMAGE_NODE(pSwapchainImages[i], &image_ci));
11165            auto &image_node = dev_data->imageMap[pSwapchainImages[i]];
11166            image_node->valid = false;
11167            image_node->mem = MEMTRACKER_SWAP_CHAIN_IMAGE_KEY;
11168            swapchain_node->images.push_back(pSwapchainImages[i]);
11169            ImageSubresourcePair subpair = {pSwapchainImages[i], false, VkImageSubresource()};
11170            dev_data->imageSubresourceMap[pSwapchainImages[i]].push_back(subpair);
11171            dev_data->imageLayoutMap[subpair] = image_layout_node;
11172            dev_data->device_extensions.imageToSwapchainMap[pSwapchainImages[i]] = swapchain;
11173        }
11174    }
11175    return result;
11176}
11177
11178VKAPI_ATTR VkResult VKAPI_CALL QueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) {
11179    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
11180    bool skip_call = false;
11181
11182    std::lock_guard<std::mutex> lock(global_lock);
11183    for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
11184        auto pSemaphore = getSemaphoreNode(dev_data, pPresentInfo->pWaitSemaphores[i]);
11185        if (pSemaphore && !pSemaphore->signaled) {
11186            skip_call |=
11187                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
11188                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
11189                            "Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.",
11190                            reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(pPresentInfo->pWaitSemaphores[i]));
11191        }
11192    }
11193
11194    for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
11195        auto swapchain_data = getSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
11196        if (swapchain_data && pPresentInfo->pImageIndices[i] < swapchain_data->images.size()) {
11197            VkImage image = swapchain_data->images[pPresentInfo->pImageIndices[i]];
11198            skip_call |= ValidateImageMemoryIsValid(dev_data, getImageNode(dev_data, image), "vkQueuePresentKHR()");
11199            vector<VkImageLayout> layouts;
11200            if (FindLayouts(dev_data, image, layouts)) {
11201                for (auto layout : layouts) {
11202                    if (layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) {
11203                        skip_call |=
11204                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
11205                                        reinterpret_cast<uint64_t &>(queue), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
11206                                        "Images passed to present must be in layout "
11207                                        "VK_IMAGE_LAYOUT_PRESENT_SRC_KHR but is in %s",
11208                                        string_VkImageLayout(layout));
11209                    }
11210                }
11211            }
11212        }
11213    }
11214
11215    if (skip_call) {
11216        return VK_ERROR_VALIDATION_FAILED_EXT;
11217    }
11218
11219    VkResult result = dev_data->device_dispatch_table->QueuePresentKHR(queue, pPresentInfo);
11220
11221    if (result != VK_ERROR_VALIDATION_FAILED_EXT) {
11222        // Semaphore waits occur before error generation, if the call reached
11223        // the ICD. (Confirm?)
11224        for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
11225            auto pSemaphore = getSemaphoreNode(dev_data, pPresentInfo->pWaitSemaphores[i]);
11226            if (pSemaphore) {
11227                pSemaphore->signaler.first = VK_NULL_HANDLE;
11228                pSemaphore->signaled = false;
11229            }
11230        }
11231
11232        // Note: even though presentation is directed to a queue, there is no
11233        // direct ordering between QP and subsequent work, so QP (and its
11234        // semaphore waits) /never/ participate in any completion proof.
11235    }
11236
11237    return result;
11238}
11239
11240VKAPI_ATTR VkResult VKAPI_CALL CreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount,
11241                                                         const VkSwapchainCreateInfoKHR *pCreateInfos,
11242                                                         const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchains) {
11243    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11244    std::unique_lock<std::mutex> lock(global_lock);
11245    VkResult result =
11246        dev_data->device_dispatch_table->CreateSharedSwapchainsKHR(device, swapchainCount, pCreateInfos, pAllocator, pSwapchains);
11247    return result;
11248}
11249
11250VKAPI_ATTR VkResult VKAPI_CALL AcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
11251                                                   VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
11252    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11253    bool skip_call = false;
11254
11255    std::unique_lock<std::mutex> lock(global_lock);
11256    auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
11257    if (pSemaphore && pSemaphore->signaled) {
11258        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
11259                             reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
11260                             "vkAcquireNextImageKHR: Semaphore must not be currently signaled or in a wait state");
11261    }
11262
11263    auto pFence = getFenceNode(dev_data, fence);
11264    if (pFence) {
11265        skip_call |= ValidateFenceForSubmit(dev_data, pFence);
11266    }
11267    lock.unlock();
11268
11269    if (skip_call)
11270        return VK_ERROR_VALIDATION_FAILED_EXT;
11271
11272    VkResult result =
11273            dev_data->device_dispatch_table->AcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex);
11274
11275    lock.lock();
11276    if (result == VK_SUCCESS || result == VK_SUBOPTIMAL_KHR) {
11277        if (pFence) {
11278            pFence->state = FENCE_INFLIGHT;
11279            pFence->signaler.first = VK_NULL_HANDLE;   // ANI isn't on a queue, so this can't participate in a completion proof.
11280        }
11281
11282        // A successful call to AcquireNextImageKHR counts as a signal operation on semaphore
11283        if (pSemaphore) {
11284            pSemaphore->signaled = true;
11285            pSemaphore->signaler.first = VK_NULL_HANDLE;
11286        }
11287    }
11288    lock.unlock();
11289
11290    return result;
11291}
11292
11293VKAPI_ATTR VkResult VKAPI_CALL EnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount,
11294                                                        VkPhysicalDevice *pPhysicalDevices) {
11295    bool skip_call = false;
11296    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
11297    if (my_data->instance_state) {
11298        // For this instance, flag when vkEnumeratePhysicalDevices goes to QUERY_COUNT and then QUERY_DETAILS
11299        if (NULL == pPhysicalDevices) {
11300            my_data->instance_state->vkEnumeratePhysicalDevicesState = QUERY_COUNT;
11301        } else {
11302            if (UNCALLED == my_data->instance_state->vkEnumeratePhysicalDevicesState) {
11303                // Flag warning here. You can call this without having queried the count, but it may not be
11304                // robust on platforms with multiple physical devices.
11305                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT,
11306                                    0, __LINE__, DEVLIMITS_MISSING_QUERY_COUNT, "DL",
11307                                    "Call sequence has vkEnumeratePhysicalDevices() w/ non-NULL pPhysicalDevices. You should first "
11308                                    "call vkEnumeratePhysicalDevices() w/ NULL pPhysicalDevices to query pPhysicalDeviceCount.");
11309            } // TODO : Could also flag a warning if re-calling this function in QUERY_DETAILS state
11310            else if (my_data->instance_state->physical_devices_count != *pPhysicalDeviceCount) {
11311                // Having actual count match count from app is not a requirement, so this can be a warning
11312                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11313                                    VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
11314                                    "Call to vkEnumeratePhysicalDevices() w/ pPhysicalDeviceCount value %u, but actual count "
11315                                    "supported by this instance is %u.",
11316                                    *pPhysicalDeviceCount, my_data->instance_state->physical_devices_count);
11317            }
11318            my_data->instance_state->vkEnumeratePhysicalDevicesState = QUERY_DETAILS;
11319        }
11320        if (skip_call) {
11321            return VK_ERROR_VALIDATION_FAILED_EXT;
11322        }
11323        VkResult result =
11324            my_data->instance_dispatch_table->EnumeratePhysicalDevices(instance, pPhysicalDeviceCount, pPhysicalDevices);
11325        if (NULL == pPhysicalDevices) {
11326            my_data->instance_state->physical_devices_count = *pPhysicalDeviceCount;
11327        } else if (result == VK_SUCCESS){ // Save physical devices
11328            for (uint32_t i = 0; i < *pPhysicalDeviceCount; i++) {
11329                layer_data *phy_dev_data = get_my_data_ptr(get_dispatch_key(pPhysicalDevices[i]), layer_data_map);
11330                phy_dev_data->physical_device_state = unique_ptr<PHYSICAL_DEVICE_STATE>(new PHYSICAL_DEVICE_STATE());
11331                // Init actual features for each physical device
11332                my_data->instance_dispatch_table->GetPhysicalDeviceFeatures(pPhysicalDevices[i],
11333                                                                            &phy_dev_data->physical_device_features);
11334            }
11335        }
11336        return result;
11337    } else {
11338        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, 0, __LINE__,
11339                DEVLIMITS_INVALID_INSTANCE, "DL", "Invalid instance (0x%" PRIxLEAST64 ") passed into vkEnumeratePhysicalDevices().",
11340                (uint64_t)instance);
11341    }
11342    return VK_ERROR_VALIDATION_FAILED_EXT;
11343}
11344
11345VKAPI_ATTR void VKAPI_CALL
11346GetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
11347    VkQueueFamilyProperties *pQueueFamilyProperties) {
11348    bool skip_call = false;
11349    layer_data *phy_dev_data = get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map);
11350    if (phy_dev_data->physical_device_state) {
11351        if (NULL == pQueueFamilyProperties) {
11352            phy_dev_data->physical_device_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_COUNT;
11353        }
11354        else {
11355            // Verify that for each physical device, this function is called first with NULL pQueueFamilyProperties ptr in order to
11356            // get count
11357            if (UNCALLED == phy_dev_data->physical_device_state->vkGetPhysicalDeviceQueueFamilyPropertiesState) {
11358                skip_call |= log_msg(phy_dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11359                    VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_MISSING_QUERY_COUNT, "DL",
11360                    "Call sequence has vkGetPhysicalDeviceQueueFamilyProperties() w/ non-NULL "
11361                    "pQueueFamilyProperties. You should first call vkGetPhysicalDeviceQueueFamilyProperties() w/ "
11362                    "NULL pQueueFamilyProperties to query pCount.");
11363            }
11364            // Then verify that pCount that is passed in on second call matches what was returned
11365            if (phy_dev_data->physical_device_state->queueFamilyPropertiesCount != *pCount) {
11366
11367                // TODO: this is not a requirement of the Valid Usage section for vkGetPhysicalDeviceQueueFamilyProperties, so
11368                // provide as warning
11369                skip_call |= log_msg(phy_dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11370                    VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
11371                    "Call to vkGetPhysicalDeviceQueueFamilyProperties() w/ pCount value %u, but actual count "
11372                    "supported by this physicalDevice is %u.",
11373                    *pCount, phy_dev_data->physical_device_state->queueFamilyPropertiesCount);
11374            }
11375            phy_dev_data->physical_device_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_DETAILS;
11376        }
11377        if (skip_call) {
11378            return;
11379        }
11380        phy_dev_data->instance_dispatch_table->GetPhysicalDeviceQueueFamilyProperties(physicalDevice, pCount,
11381            pQueueFamilyProperties);
11382        if (NULL == pQueueFamilyProperties) {
11383            phy_dev_data->physical_device_state->queueFamilyPropertiesCount = *pCount;
11384        }
11385        else { // Save queue family properties
11386            phy_dev_data->queue_family_properties.reserve(*pCount);
11387            for (uint32_t i = 0; i < *pCount; i++) {
11388                phy_dev_data->queue_family_properties.emplace_back(new VkQueueFamilyProperties(pQueueFamilyProperties[i]));
11389            }
11390        }
11391        return;
11392    }
11393    else {
11394        log_msg(phy_dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
11395            __LINE__, DEVLIMITS_INVALID_PHYSICAL_DEVICE, "DL",
11396            "Invalid physicalDevice (0x%" PRIxLEAST64 ") passed into vkGetPhysicalDeviceQueueFamilyProperties().",
11397            (uint64_t)physicalDevice);
11398    }
11399}
11400
11401VKAPI_ATTR VkResult VKAPI_CALL
11402CreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
11403                             const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) {
11404    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
11405    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
11406    VkResult res = pTable->CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
11407    if (VK_SUCCESS == res) {
11408        std::lock_guard<std::mutex> lock(global_lock);
11409        res = layer_create_msg_callback(my_data->report_data, false, pCreateInfo, pAllocator, pMsgCallback);
11410    }
11411    return res;
11412}
11413
11414VKAPI_ATTR void VKAPI_CALL DestroyDebugReportCallbackEXT(VkInstance instance,
11415                                                         VkDebugReportCallbackEXT msgCallback,
11416                                                         const VkAllocationCallbacks *pAllocator) {
11417    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
11418    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
11419    pTable->DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
11420    std::lock_guard<std::mutex> lock(global_lock);
11421    layer_destroy_msg_callback(my_data->report_data, msgCallback, pAllocator);
11422}
11423
11424VKAPI_ATTR void VKAPI_CALL
11425DebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object,
11426                      size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
11427    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
11428    my_data->instance_dispatch_table->DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix,
11429                                                            pMsg);
11430}
11431
11432VKAPI_ATTR VkResult VKAPI_CALL
11433EnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
11434    return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
11435}
11436
11437VKAPI_ATTR VkResult VKAPI_CALL
11438EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties *pProperties) {
11439    return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
11440}
11441
11442VKAPI_ATTR VkResult VKAPI_CALL
11443EnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties *pProperties) {
11444    if (pLayerName && !strcmp(pLayerName, global_layer.layerName))
11445        return util_GetExtensionProperties(1, instance_extensions, pCount, pProperties);
11446
11447    return VK_ERROR_LAYER_NOT_PRESENT;
11448}
11449
11450VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
11451                                                                  const char *pLayerName, uint32_t *pCount,
11452                                                                  VkExtensionProperties *pProperties) {
11453    if (pLayerName && !strcmp(pLayerName, global_layer.layerName))
11454        return util_GetExtensionProperties(0, NULL, pCount, pProperties);
11455
11456    assert(physicalDevice);
11457
11458    dispatch_key key = get_dispatch_key(physicalDevice);
11459    layer_data *my_data = get_my_data_ptr(key, layer_data_map);
11460    return my_data->instance_dispatch_table->EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties);
11461}
11462
11463static PFN_vkVoidFunction
11464intercept_core_instance_command(const char *name);
11465
11466static PFN_vkVoidFunction
11467intercept_core_device_command(const char *name);
11468
11469static PFN_vkVoidFunction
11470intercept_khr_swapchain_command(const char *name, VkDevice dev);
11471
11472VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetDeviceProcAddr(VkDevice dev, const char *funcName) {
11473    PFN_vkVoidFunction proc = intercept_core_device_command(funcName);
11474    if (proc)
11475        return proc;
11476
11477    assert(dev);
11478
11479    proc = intercept_khr_swapchain_command(funcName, dev);
11480    if (proc)
11481        return proc;
11482
11483    layer_data *dev_data;
11484    dev_data = get_my_data_ptr(get_dispatch_key(dev), layer_data_map);
11485
11486    VkLayerDispatchTable *pTable = dev_data->device_dispatch_table;
11487    {
11488        if (pTable->GetDeviceProcAddr == NULL)
11489            return NULL;
11490        return pTable->GetDeviceProcAddr(dev, funcName);
11491    }
11492}
11493
11494VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetInstanceProcAddr(VkInstance instance, const char *funcName) {
11495    PFN_vkVoidFunction proc = intercept_core_instance_command(funcName);
11496    if (!proc)
11497        proc = intercept_core_device_command(funcName);
11498    if (!proc)
11499        proc = intercept_khr_swapchain_command(funcName, VK_NULL_HANDLE);
11500    if (proc)
11501        return proc;
11502
11503    assert(instance);
11504
11505    layer_data *my_data;
11506    my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
11507    proc = debug_report_get_instance_proc_addr(my_data->report_data, funcName);
11508    if (proc)
11509        return proc;
11510
11511    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
11512    if (pTable->GetInstanceProcAddr == NULL)
11513        return NULL;
11514    return pTable->GetInstanceProcAddr(instance, funcName);
11515}
11516
11517static PFN_vkVoidFunction
11518intercept_core_instance_command(const char *name) {
11519    static const struct {
11520        const char *name;
11521        PFN_vkVoidFunction proc;
11522    } core_instance_commands[] = {
11523        { "vkGetInstanceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetInstanceProcAddr) },
11524        { "vkGetDeviceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceProcAddr) },
11525        { "vkCreateInstance", reinterpret_cast<PFN_vkVoidFunction>(CreateInstance) },
11526        { "vkCreateDevice", reinterpret_cast<PFN_vkVoidFunction>(CreateDevice) },
11527        { "vkEnumeratePhysicalDevices", reinterpret_cast<PFN_vkVoidFunction>(EnumeratePhysicalDevices) },
11528        { "vkGetPhysicalDeviceQueueFamilyProperties", reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceQueueFamilyProperties) },
11529        { "vkDestroyInstance", reinterpret_cast<PFN_vkVoidFunction>(DestroyInstance) },
11530        { "vkEnumerateInstanceLayerProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateInstanceLayerProperties) },
11531        { "vkEnumerateDeviceLayerProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateDeviceLayerProperties) },
11532        { "vkEnumerateInstanceExtensionProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateInstanceExtensionProperties) },
11533        { "vkEnumerateDeviceExtensionProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateDeviceExtensionProperties) },
11534    };
11535
11536    for (size_t i = 0; i < ARRAY_SIZE(core_instance_commands); i++) {
11537        if (!strcmp(core_instance_commands[i].name, name))
11538            return core_instance_commands[i].proc;
11539    }
11540
11541    return nullptr;
11542}
11543
11544static PFN_vkVoidFunction
11545intercept_core_device_command(const char *name) {
11546    static const struct {
11547        const char *name;
11548        PFN_vkVoidFunction proc;
11549    } core_device_commands[] = {
11550        {"vkGetDeviceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceProcAddr)},
11551        {"vkQueueSubmit", reinterpret_cast<PFN_vkVoidFunction>(QueueSubmit)},
11552        {"vkWaitForFences", reinterpret_cast<PFN_vkVoidFunction>(WaitForFences)},
11553        {"vkGetFenceStatus", reinterpret_cast<PFN_vkVoidFunction>(GetFenceStatus)},
11554        {"vkQueueWaitIdle", reinterpret_cast<PFN_vkVoidFunction>(QueueWaitIdle)},
11555        {"vkDeviceWaitIdle", reinterpret_cast<PFN_vkVoidFunction>(DeviceWaitIdle)},
11556        {"vkGetDeviceQueue", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceQueue)},
11557        {"vkDestroyInstance", reinterpret_cast<PFN_vkVoidFunction>(DestroyInstance)},
11558        {"vkDestroyDevice", reinterpret_cast<PFN_vkVoidFunction>(DestroyDevice)},
11559        {"vkDestroyFence", reinterpret_cast<PFN_vkVoidFunction>(DestroyFence)},
11560        {"vkResetFences", reinterpret_cast<PFN_vkVoidFunction>(ResetFences)},
11561        {"vkDestroySemaphore", reinterpret_cast<PFN_vkVoidFunction>(DestroySemaphore)},
11562        {"vkDestroyEvent", reinterpret_cast<PFN_vkVoidFunction>(DestroyEvent)},
11563        {"vkDestroyQueryPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyQueryPool)},
11564        {"vkDestroyBuffer", reinterpret_cast<PFN_vkVoidFunction>(DestroyBuffer)},
11565        {"vkDestroyBufferView", reinterpret_cast<PFN_vkVoidFunction>(DestroyBufferView)},
11566        {"vkDestroyImage", reinterpret_cast<PFN_vkVoidFunction>(DestroyImage)},
11567        {"vkDestroyImageView", reinterpret_cast<PFN_vkVoidFunction>(DestroyImageView)},
11568        {"vkDestroyShaderModule", reinterpret_cast<PFN_vkVoidFunction>(DestroyShaderModule)},
11569        {"vkDestroyPipeline", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipeline)},
11570        {"vkDestroyPipelineLayout", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipelineLayout)},
11571        {"vkDestroySampler", reinterpret_cast<PFN_vkVoidFunction>(DestroySampler)},
11572        {"vkDestroyDescriptorSetLayout", reinterpret_cast<PFN_vkVoidFunction>(DestroyDescriptorSetLayout)},
11573        {"vkDestroyDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyDescriptorPool)},
11574        {"vkDestroyFramebuffer", reinterpret_cast<PFN_vkVoidFunction>(DestroyFramebuffer)},
11575        {"vkDestroyRenderPass", reinterpret_cast<PFN_vkVoidFunction>(DestroyRenderPass)},
11576        {"vkCreateBuffer", reinterpret_cast<PFN_vkVoidFunction>(CreateBuffer)},
11577        {"vkCreateBufferView", reinterpret_cast<PFN_vkVoidFunction>(CreateBufferView)},
11578        {"vkCreateImage", reinterpret_cast<PFN_vkVoidFunction>(CreateImage)},
11579        {"vkCreateImageView", reinterpret_cast<PFN_vkVoidFunction>(CreateImageView)},
11580        {"vkCreateFence", reinterpret_cast<PFN_vkVoidFunction>(CreateFence)},
11581        {"vkCreatePipelineCache", reinterpret_cast<PFN_vkVoidFunction>(CreatePipelineCache)},
11582        {"vkDestroyPipelineCache", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipelineCache)},
11583        {"vkGetPipelineCacheData", reinterpret_cast<PFN_vkVoidFunction>(GetPipelineCacheData)},
11584        {"vkMergePipelineCaches", reinterpret_cast<PFN_vkVoidFunction>(MergePipelineCaches)},
11585        {"vkCreateGraphicsPipelines", reinterpret_cast<PFN_vkVoidFunction>(CreateGraphicsPipelines)},
11586        {"vkCreateComputePipelines", reinterpret_cast<PFN_vkVoidFunction>(CreateComputePipelines)},
11587        {"vkCreateSampler", reinterpret_cast<PFN_vkVoidFunction>(CreateSampler)},
11588        {"vkCreateDescriptorSetLayout", reinterpret_cast<PFN_vkVoidFunction>(CreateDescriptorSetLayout)},
11589        {"vkCreatePipelineLayout", reinterpret_cast<PFN_vkVoidFunction>(CreatePipelineLayout)},
11590        {"vkCreateDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(CreateDescriptorPool)},
11591        {"vkResetDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(ResetDescriptorPool)},
11592        {"vkAllocateDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(AllocateDescriptorSets)},
11593        {"vkFreeDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(FreeDescriptorSets)},
11594        {"vkUpdateDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(UpdateDescriptorSets)},
11595        {"vkCreateCommandPool", reinterpret_cast<PFN_vkVoidFunction>(CreateCommandPool)},
11596        {"vkDestroyCommandPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyCommandPool)},
11597        {"vkResetCommandPool", reinterpret_cast<PFN_vkVoidFunction>(ResetCommandPool)},
11598        {"vkCreateQueryPool", reinterpret_cast<PFN_vkVoidFunction>(CreateQueryPool)},
11599        {"vkAllocateCommandBuffers", reinterpret_cast<PFN_vkVoidFunction>(AllocateCommandBuffers)},
11600        {"vkFreeCommandBuffers", reinterpret_cast<PFN_vkVoidFunction>(FreeCommandBuffers)},
11601        {"vkBeginCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(BeginCommandBuffer)},
11602        {"vkEndCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(EndCommandBuffer)},
11603        {"vkResetCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(ResetCommandBuffer)},
11604        {"vkCmdBindPipeline", reinterpret_cast<PFN_vkVoidFunction>(CmdBindPipeline)},
11605        {"vkCmdSetViewport", reinterpret_cast<PFN_vkVoidFunction>(CmdSetViewport)},
11606        {"vkCmdSetScissor", reinterpret_cast<PFN_vkVoidFunction>(CmdSetScissor)},
11607        {"vkCmdSetLineWidth", reinterpret_cast<PFN_vkVoidFunction>(CmdSetLineWidth)},
11608        {"vkCmdSetDepthBias", reinterpret_cast<PFN_vkVoidFunction>(CmdSetDepthBias)},
11609        {"vkCmdSetBlendConstants", reinterpret_cast<PFN_vkVoidFunction>(CmdSetBlendConstants)},
11610        {"vkCmdSetDepthBounds", reinterpret_cast<PFN_vkVoidFunction>(CmdSetDepthBounds)},
11611        {"vkCmdSetStencilCompareMask", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilCompareMask)},
11612        {"vkCmdSetStencilWriteMask", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilWriteMask)},
11613        {"vkCmdSetStencilReference", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilReference)},
11614        {"vkCmdBindDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(CmdBindDescriptorSets)},
11615        {"vkCmdBindVertexBuffers", reinterpret_cast<PFN_vkVoidFunction>(CmdBindVertexBuffers)},
11616        {"vkCmdBindIndexBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdBindIndexBuffer)},
11617        {"vkCmdDraw", reinterpret_cast<PFN_vkVoidFunction>(CmdDraw)},
11618        {"vkCmdDrawIndexed", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndexed)},
11619        {"vkCmdDrawIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndirect)},
11620        {"vkCmdDrawIndexedIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndexedIndirect)},
11621        {"vkCmdDispatch", reinterpret_cast<PFN_vkVoidFunction>(CmdDispatch)},
11622        {"vkCmdDispatchIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDispatchIndirect)},
11623        {"vkCmdCopyBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyBuffer)},
11624        {"vkCmdCopyImage", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyImage)},
11625        {"vkCmdBlitImage", reinterpret_cast<PFN_vkVoidFunction>(CmdBlitImage)},
11626        {"vkCmdCopyBufferToImage", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyBufferToImage)},
11627        {"vkCmdCopyImageToBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyImageToBuffer)},
11628        {"vkCmdUpdateBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdUpdateBuffer)},
11629        {"vkCmdFillBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdFillBuffer)},
11630        {"vkCmdClearColorImage", reinterpret_cast<PFN_vkVoidFunction>(CmdClearColorImage)},
11631        {"vkCmdClearDepthStencilImage", reinterpret_cast<PFN_vkVoidFunction>(CmdClearDepthStencilImage)},
11632        {"vkCmdClearAttachments", reinterpret_cast<PFN_vkVoidFunction>(CmdClearAttachments)},
11633        {"vkCmdResolveImage", reinterpret_cast<PFN_vkVoidFunction>(CmdResolveImage)},
11634        {"vkCmdSetEvent", reinterpret_cast<PFN_vkVoidFunction>(CmdSetEvent)},
11635        {"vkCmdResetEvent", reinterpret_cast<PFN_vkVoidFunction>(CmdResetEvent)},
11636        {"vkCmdWaitEvents", reinterpret_cast<PFN_vkVoidFunction>(CmdWaitEvents)},
11637        {"vkCmdPipelineBarrier", reinterpret_cast<PFN_vkVoidFunction>(CmdPipelineBarrier)},
11638        {"vkCmdBeginQuery", reinterpret_cast<PFN_vkVoidFunction>(CmdBeginQuery)},
11639        {"vkCmdEndQuery", reinterpret_cast<PFN_vkVoidFunction>(CmdEndQuery)},
11640        {"vkCmdResetQueryPool", reinterpret_cast<PFN_vkVoidFunction>(CmdResetQueryPool)},
11641        {"vkCmdCopyQueryPoolResults", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyQueryPoolResults)},
11642        {"vkCmdPushConstants", reinterpret_cast<PFN_vkVoidFunction>(CmdPushConstants)},
11643        {"vkCmdWriteTimestamp", reinterpret_cast<PFN_vkVoidFunction>(CmdWriteTimestamp)},
11644        {"vkCreateFramebuffer", reinterpret_cast<PFN_vkVoidFunction>(CreateFramebuffer)},
11645        {"vkCreateShaderModule", reinterpret_cast<PFN_vkVoidFunction>(CreateShaderModule)},
11646        {"vkCreateRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CreateRenderPass)},
11647        {"vkCmdBeginRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CmdBeginRenderPass)},
11648        {"vkCmdNextSubpass", reinterpret_cast<PFN_vkVoidFunction>(CmdNextSubpass)},
11649        {"vkCmdEndRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CmdEndRenderPass)},
11650        {"vkCmdExecuteCommands", reinterpret_cast<PFN_vkVoidFunction>(CmdExecuteCommands)},
11651        {"vkSetEvent", reinterpret_cast<PFN_vkVoidFunction>(SetEvent)},
11652        {"vkMapMemory", reinterpret_cast<PFN_vkVoidFunction>(MapMemory)},
11653        {"vkUnmapMemory", reinterpret_cast<PFN_vkVoidFunction>(UnmapMemory)},
11654        {"vkFlushMappedMemoryRanges", reinterpret_cast<PFN_vkVoidFunction>(FlushMappedMemoryRanges)},
11655        {"vkInvalidateMappedMemoryRanges", reinterpret_cast<PFN_vkVoidFunction>(InvalidateMappedMemoryRanges)},
11656        {"vkAllocateMemory", reinterpret_cast<PFN_vkVoidFunction>(AllocateMemory)},
11657        {"vkFreeMemory", reinterpret_cast<PFN_vkVoidFunction>(FreeMemory)},
11658        {"vkBindBufferMemory", reinterpret_cast<PFN_vkVoidFunction>(BindBufferMemory)},
11659        {"vkGetBufferMemoryRequirements", reinterpret_cast<PFN_vkVoidFunction>(GetBufferMemoryRequirements)},
11660        {"vkGetImageMemoryRequirements", reinterpret_cast<PFN_vkVoidFunction>(GetImageMemoryRequirements)},
11661        {"vkGetQueryPoolResults", reinterpret_cast<PFN_vkVoidFunction>(GetQueryPoolResults)},
11662        {"vkBindImageMemory", reinterpret_cast<PFN_vkVoidFunction>(BindImageMemory)},
11663        {"vkQueueBindSparse", reinterpret_cast<PFN_vkVoidFunction>(QueueBindSparse)},
11664        {"vkCreateSemaphore", reinterpret_cast<PFN_vkVoidFunction>(CreateSemaphore)},
11665        {"vkCreateEvent", reinterpret_cast<PFN_vkVoidFunction>(CreateEvent)},
11666    };
11667
11668    for (size_t i = 0; i < ARRAY_SIZE(core_device_commands); i++) {
11669        if (!strcmp(core_device_commands[i].name, name))
11670            return core_device_commands[i].proc;
11671    }
11672
11673    return nullptr;
11674}
11675
11676static PFN_vkVoidFunction
11677intercept_khr_swapchain_command(const char *name, VkDevice dev) {
11678    static const struct {
11679        const char *name;
11680        PFN_vkVoidFunction proc;
11681    } khr_swapchain_commands[] = {
11682        { "vkCreateSwapchainKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateSwapchainKHR) },
11683        { "vkDestroySwapchainKHR", reinterpret_cast<PFN_vkVoidFunction>(DestroySwapchainKHR) },
11684        { "vkGetSwapchainImagesKHR", reinterpret_cast<PFN_vkVoidFunction>(GetSwapchainImagesKHR) },
11685        { "vkAcquireNextImageKHR", reinterpret_cast<PFN_vkVoidFunction>(AcquireNextImageKHR) },
11686        { "vkQueuePresentKHR", reinterpret_cast<PFN_vkVoidFunction>(QueuePresentKHR) },
11687    };
11688    layer_data *dev_data = nullptr;
11689
11690    if (dev) {
11691        dev_data = get_my_data_ptr(get_dispatch_key(dev), layer_data_map);
11692        if (!dev_data->device_extensions.wsi_enabled)
11693            return nullptr;
11694    }
11695
11696    for (size_t i = 0; i < ARRAY_SIZE(khr_swapchain_commands); i++) {
11697        if (!strcmp(khr_swapchain_commands[i].name, name))
11698            return khr_swapchain_commands[i].proc;
11699    }
11700
11701    if (dev_data) {
11702        if (!dev_data->device_extensions.wsi_display_swapchain_enabled)
11703            return nullptr;
11704    }
11705
11706    if (!strcmp("vkCreateSharedSwapchainsKHR", name))
11707        return reinterpret_cast<PFN_vkVoidFunction>(CreateSharedSwapchainsKHR);
11708
11709    return nullptr;
11710}
11711
11712} // namespace core_validation
11713
11714// vk_layer_logging.h expects these to be defined
11715
11716VKAPI_ATTR VkResult VKAPI_CALL
11717vkCreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
11718                               const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) {
11719    return core_validation::CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
11720}
11721
11722VKAPI_ATTR void VKAPI_CALL
11723vkDestroyDebugReportCallbackEXT(VkInstance instance,
11724                                VkDebugReportCallbackEXT msgCallback,
11725                                const VkAllocationCallbacks *pAllocator) {
11726    core_validation::DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
11727}
11728
11729VKAPI_ATTR void VKAPI_CALL
11730vkDebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object,
11731                        size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
11732    core_validation::DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, pMsg);
11733}
11734
11735// loader-layer interface v0, just wrappers since there is only a layer
11736
11737VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
11738vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties *pProperties) {
11739    return core_validation::EnumerateInstanceExtensionProperties(pLayerName, pCount, pProperties);
11740}
11741
11742VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
11743vkEnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
11744    return core_validation::EnumerateInstanceLayerProperties(pCount, pProperties);
11745}
11746
11747VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
11748vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties *pProperties) {
11749    // the layer command handles VK_NULL_HANDLE just fine internally
11750    assert(physicalDevice == VK_NULL_HANDLE);
11751    return core_validation::EnumerateDeviceLayerProperties(VK_NULL_HANDLE, pCount, pProperties);
11752}
11753
11754VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
11755                                                                                    const char *pLayerName, uint32_t *pCount,
11756                                                                                    VkExtensionProperties *pProperties) {
11757    // the layer command handles VK_NULL_HANDLE just fine internally
11758    assert(physicalDevice == VK_NULL_HANDLE);
11759    return core_validation::EnumerateDeviceExtensionProperties(VK_NULL_HANDLE, pLayerName, pCount, pProperties);
11760}
11761
11762VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) {
11763    return core_validation::GetDeviceProcAddr(dev, funcName);
11764}
11765
11766VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) {
11767    return core_validation::GetInstanceProcAddr(instance, funcName);
11768}
11769