core_validation.cpp revision df1846b33487b02061c0ff42d768588a9abeb6c7
1/* Copyright (c) 2015-2016 The Khronos Group Inc.
2 * Copyright (c) 2015-2016 Valve Corporation
3 * Copyright (c) 2015-2016 LunarG, Inc.
4 * Copyright (C) 2015-2016 Google Inc.
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 *     http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * Author: Cody Northrop <cnorthrop@google.com>
19 * Author: Michael Lentine <mlentine@google.com>
20 * Author: Tobin Ehlis <tobine@google.com>
21 * Author: Chia-I Wu <olv@google.com>
22 * Author: Chris Forbes <chrisf@ijw.co.nz>
23 * Author: Mark Lobodzinski <mark@lunarg.com>
24 * Author: Ian Elliott <ianelliott@google.com>
25 */
26
27// Allow use of STL min and max functions in Windows
28#define NOMINMAX
29
30// Turn on mem_tracker merged code
31#define MTMERGESOURCE 1
32
33#include <SPIRV/spirv.hpp>
34#include <algorithm>
35#include <assert.h>
36#include <iostream>
37#include <list>
38#include <map>
39#include <mutex>
40#include <set>
41#include <stdio.h>
42#include <stdlib.h>
43#include <string.h>
44#include <string>
45#include <tuple>
46
47#include "vk_loader_platform.h"
48#include "vk_dispatch_table_helper.h"
49#include "vk_struct_string_helper_cpp.h"
50#if defined(__GNUC__)
51#pragma GCC diagnostic ignored "-Wwrite-strings"
52#endif
53#if defined(__GNUC__)
54#pragma GCC diagnostic warning "-Wwrite-strings"
55#endif
56#include "vk_struct_size_helper.h"
57#include "core_validation.h"
58#include "vk_layer_table.h"
59#include "vk_layer_data.h"
60#include "vk_layer_extension_utils.h"
61#include "vk_layer_utils.h"
62#include "spirv-tools/libspirv.h"
63
64#if defined __ANDROID__
65#include <android/log.h>
66#define LOGCONSOLE(...) ((void)__android_log_print(ANDROID_LOG_INFO, "DS", __VA_ARGS__))
67#else
68#define LOGCONSOLE(...)                                                                                                            \
69    {                                                                                                                              \
70        printf(__VA_ARGS__);                                                                                                       \
71        printf("\n");                                                                                                              \
72    }
73#endif
74
75using namespace std;
76
77// TODO : CB really needs it's own class and files so this is just temp code until that happens
78GLOBAL_CB_NODE::~GLOBAL_CB_NODE() {
79    for (uint32_t i=0; i<VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
80        // Make sure that no sets hold onto deleted CB binding
81        for (auto set : lastBound[i].uniqueBoundSets) {
82            set->RemoveBoundCommandBuffer(this);
83        }
84    }
85}
86
87namespace core_validation {
88
89using std::unordered_map;
90using std::unordered_set;
91
92// WSI Image Objects bypass usual Image Object creation methods.  A special Memory
93// Object value will be used to identify them internally.
94static const VkDeviceMemory MEMTRACKER_SWAP_CHAIN_IMAGE_KEY = (VkDeviceMemory)(-1);
95
96// Track command pools and their command buffers
97struct CMD_POOL_INFO {
98    VkCommandPoolCreateFlags createFlags;
99    uint32_t queueFamilyIndex;
100    list<VkCommandBuffer> commandBuffers; // list container of cmd buffers allocated from this pool
101};
102
103struct devExts {
104    bool wsi_enabled;
105    unordered_map<VkSwapchainKHR, unique_ptr<SWAPCHAIN_NODE>> swapchainMap;
106    unordered_map<VkImage, VkSwapchainKHR> imageToSwapchainMap;
107};
108
109// fwd decls
110struct shader_module;
111
112// TODO : Split this into separate structs for instance and device level data?
113struct layer_data {
114    VkInstance instance;
115
116    debug_report_data *report_data;
117    std::vector<VkDebugReportCallbackEXT> logging_callback;
118    VkLayerDispatchTable *device_dispatch_table;
119    VkLayerInstanceDispatchTable *instance_dispatch_table;
120
121    devExts device_extensions;
122    unordered_set<VkQueue> queues;  // all queues under given device
123    // Global set of all cmdBuffers that are inFlight on this device
124    unordered_set<VkCommandBuffer> globalInFlightCmdBuffers;
125    // Layer specific data
126    unordered_map<VkSampler, unique_ptr<SAMPLER_NODE>> samplerMap;
127    unordered_map<VkImageView, unique_ptr<VkImageViewCreateInfo>> imageViewMap;
128    unordered_map<VkImage, unique_ptr<IMAGE_NODE>> imageMap;
129    unordered_map<VkBufferView, unique_ptr<VkBufferViewCreateInfo>> bufferViewMap;
130    unordered_map<VkBuffer, unique_ptr<BUFFER_NODE>> bufferMap;
131    unordered_map<VkPipeline, PIPELINE_NODE *> pipelineMap;
132    unordered_map<VkCommandPool, CMD_POOL_INFO> commandPoolMap;
133    unordered_map<VkDescriptorPool, DESCRIPTOR_POOL_NODE *> descriptorPoolMap;
134    unordered_map<VkDescriptorSet, cvdescriptorset::DescriptorSet *> setMap;
135    unordered_map<VkDescriptorSetLayout, cvdescriptorset::DescriptorSetLayout *> descriptorSetLayoutMap;
136    unordered_map<VkPipelineLayout, PIPELINE_LAYOUT_NODE> pipelineLayoutMap;
137    unordered_map<VkDeviceMemory, unique_ptr<DEVICE_MEM_INFO>> memObjMap;
138    unordered_map<VkFence, FENCE_NODE> fenceMap;
139    unordered_map<VkQueue, QUEUE_NODE> queueMap;
140    unordered_map<VkEvent, EVENT_NODE> eventMap;
141    unordered_map<QueryObject, bool> queryToStateMap;
142    unordered_map<VkQueryPool, QUERY_POOL_NODE> queryPoolMap;
143    unordered_map<VkSemaphore, SEMAPHORE_NODE> semaphoreMap;
144    unordered_map<VkCommandBuffer, GLOBAL_CB_NODE *> commandBufferMap;
145    unordered_map<VkFramebuffer, FRAMEBUFFER_NODE> frameBufferMap;
146    unordered_map<VkImage, vector<ImageSubresourcePair>> imageSubresourceMap;
147    unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> imageLayoutMap;
148    unordered_map<VkRenderPass, RENDER_PASS_NODE *> renderPassMap;
149    unordered_map<VkShaderModule, unique_ptr<shader_module>> shaderModuleMap;
150    VkDevice device;
151
152    // Device specific data
153    PHYS_DEV_PROPERTIES_NODE phys_dev_properties;
154    VkPhysicalDeviceMemoryProperties phys_dev_mem_props;
155
156    layer_data()
157        : report_data(nullptr), device_dispatch_table(nullptr), instance_dispatch_table(nullptr), device_extensions(),
158          device(VK_NULL_HANDLE), phys_dev_properties{}, phys_dev_mem_props{} {};
159};
160
161// TODO : Do we need to guard access to layer_data_map w/ lock?
162static unordered_map<void *, layer_data *> layer_data_map;
163
164static const VkLayerProperties global_layer = {
165    "VK_LAYER_LUNARG_core_validation", VK_LAYER_API_VERSION, 1, "LunarG Validation Layer",
166};
167
168template <class TCreateInfo> void ValidateLayerOrdering(const TCreateInfo &createInfo) {
169    bool foundLayer = false;
170    for (uint32_t i = 0; i < createInfo.enabledLayerCount; ++i) {
171        if (!strcmp(createInfo.ppEnabledLayerNames[i], global_layer.layerName)) {
172            foundLayer = true;
173        }
174        // This has to be logged to console as we don't have a callback at this point.
175        if (!foundLayer && !strcmp(createInfo.ppEnabledLayerNames[0], "VK_LAYER_GOOGLE_unique_objects")) {
176            LOGCONSOLE("Cannot activate layer VK_LAYER_GOOGLE_unique_objects prior to activating %s.",
177                       global_layer.layerName);
178        }
179    }
180}
181
182// Code imported from shader_checker
183static void build_def_index(shader_module *);
184
185// A forward iterator over spirv instructions. Provides easy access to len, opcode, and content words
186// without the caller needing to care too much about the physical SPIRV module layout.
187struct spirv_inst_iter {
188    std::vector<uint32_t>::const_iterator zero;
189    std::vector<uint32_t>::const_iterator it;
190
191    uint32_t len() {
192        auto result = *it >> 16;
193        assert(result > 0);
194        return result;
195    }
196
197    uint32_t opcode() { return *it & 0x0ffffu; }
198
199    uint32_t const &word(unsigned n) {
200        assert(n < len());
201        return it[n];
202    }
203
204    uint32_t offset() { return (uint32_t)(it - zero); }
205
206    spirv_inst_iter() {}
207
208    spirv_inst_iter(std::vector<uint32_t>::const_iterator zero, std::vector<uint32_t>::const_iterator it) : zero(zero), it(it) {}
209
210    bool operator==(spirv_inst_iter const &other) { return it == other.it; }
211
212    bool operator!=(spirv_inst_iter const &other) { return it != other.it; }
213
214    spirv_inst_iter operator++(int) { /* x++ */
215        spirv_inst_iter ii = *this;
216        it += len();
217        return ii;
218    }
219
220    spirv_inst_iter operator++() { /* ++x; */
221        it += len();
222        return *this;
223    }
224
225    /* The iterator and the value are the same thing. */
226    spirv_inst_iter &operator*() { return *this; }
227    spirv_inst_iter const &operator*() const { return *this; }
228};
229
230struct shader_module {
231    /* the spirv image itself */
232    vector<uint32_t> words;
233    /* a mapping of <id> to the first word of its def. this is useful because walking type
234     * trees, constant expressions, etc requires jumping all over the instruction stream.
235     */
236    unordered_map<unsigned, unsigned> def_index;
237
238    shader_module(VkShaderModuleCreateInfo const *pCreateInfo)
239        : words((uint32_t *)pCreateInfo->pCode, (uint32_t *)pCreateInfo->pCode + pCreateInfo->codeSize / sizeof(uint32_t)),
240          def_index() {
241
242        build_def_index(this);
243    }
244
245    /* expose begin() / end() to enable range-based for */
246    spirv_inst_iter begin() const { return spirv_inst_iter(words.begin(), words.begin() + 5); } /* first insn */
247    spirv_inst_iter end() const { return spirv_inst_iter(words.begin(), words.end()); }         /* just past last insn */
248    /* given an offset into the module, produce an iterator there. */
249    spirv_inst_iter at(unsigned offset) const { return spirv_inst_iter(words.begin(), words.begin() + offset); }
250
251    /* gets an iterator to the definition of an id */
252    spirv_inst_iter get_def(unsigned id) const {
253        auto it = def_index.find(id);
254        if (it == def_index.end()) {
255            return end();
256        }
257        return at(it->second);
258    }
259};
260
261// TODO : This can be much smarter, using separate locks for separate global data
262static std::mutex global_lock;
263
264// Return ImageViewCreateInfo ptr for specified imageView or else NULL
265VkImageViewCreateInfo *getImageViewData(const layer_data *dev_data, VkImageView image_view) {
266    auto iv_it = dev_data->imageViewMap.find(image_view);
267    if (iv_it == dev_data->imageViewMap.end()) {
268        return nullptr;
269    }
270    return iv_it->second.get();
271}
272// Return sampler node ptr for specified sampler or else NULL
273SAMPLER_NODE *getSamplerNode(const layer_data *dev_data, VkSampler sampler) {
274    auto sampler_it = dev_data->samplerMap.find(sampler);
275    if (sampler_it == dev_data->samplerMap.end()) {
276        return nullptr;
277    }
278    return sampler_it->second.get();
279}
280// Return image node ptr for specified image or else NULL
281IMAGE_NODE *getImageNode(const layer_data *dev_data, VkImage image) {
282    auto img_it = dev_data->imageMap.find(image);
283    if (img_it == dev_data->imageMap.end()) {
284        return nullptr;
285    }
286    return img_it->second.get();
287}
288// Return buffer node ptr for specified buffer or else NULL
289BUFFER_NODE *getBufferNode(const layer_data *dev_data, VkBuffer buffer) {
290    auto buff_it = dev_data->bufferMap.find(buffer);
291    if (buff_it == dev_data->bufferMap.end()) {
292        return nullptr;
293    }
294    return buff_it->second.get();
295}
296// Return swapchain node for specified swapchain or else NULL
297SWAPCHAIN_NODE *getSwapchainNode(const layer_data *dev_data, VkSwapchainKHR swapchain) {
298    auto swp_it = dev_data->device_extensions.swapchainMap.find(swapchain);
299    if (swp_it == dev_data->device_extensions.swapchainMap.end()) {
300        return nullptr;
301    }
302    return swp_it->second.get();
303}
304// Return swapchain for specified image or else NULL
305VkSwapchainKHR getSwapchainFromImage(const layer_data *dev_data, VkImage image) {
306    auto img_it = dev_data->device_extensions.imageToSwapchainMap.find(image);
307    if (img_it == dev_data->device_extensions.imageToSwapchainMap.end()) {
308        return VK_NULL_HANDLE;
309    }
310    return img_it->second;
311}
312// Return buffer node ptr for specified buffer or else NULL
313VkBufferViewCreateInfo *getBufferViewInfo(const layer_data *my_data, VkBufferView buffer_view) {
314    auto bv_it = my_data->bufferViewMap.find(buffer_view);
315    if (bv_it == my_data->bufferViewMap.end()) {
316        return nullptr;
317    }
318    return bv_it->second.get();
319}
320
321FENCE_NODE *getFenceNode(layer_data *dev_data, VkFence fence) {
322    auto it = dev_data->fenceMap.find(fence);
323    if (it == dev_data->fenceMap.end()) {
324        return nullptr;
325    }
326    return &it->second;
327}
328
329QUEUE_NODE *getQueueNode(layer_data *dev_data, VkQueue queue) {
330    auto it = dev_data->queueMap.find(queue);
331    if (it == dev_data->queueMap.end()) {
332        return nullptr;
333    }
334    return &it->second;
335}
336
337static VkDeviceMemory *get_object_mem_binding(layer_data *my_data, uint64_t handle, VkDebugReportObjectTypeEXT type) {
338    switch (type) {
339    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
340        auto img_node = getImageNode(my_data, VkImage(handle));
341        if (img_node)
342            return &img_node->mem;
343        break;
344    }
345    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
346        auto buff_node = getBufferNode(my_data, VkBuffer(handle));
347        if (buff_node)
348            return &buff_node->mem;
349        break;
350    }
351    default:
352        break;
353    }
354    return nullptr;
355}
356
357// prototype
358static GLOBAL_CB_NODE *getCBNode(layer_data const *, const VkCommandBuffer);
359
360// Helper function to validate correct usage bits set for buffers or images
361//  Verify that (actual & desired) flags != 0 or,
362//   if strict is true, verify that (actual & desired) flags == desired
363//  In case of error, report it via dbg callbacks
364static bool validate_usage_flags(layer_data *my_data, VkFlags actual, VkFlags desired, VkBool32 strict,
365                                     uint64_t obj_handle, VkDebugReportObjectTypeEXT obj_type, char const *ty_str,
366                                     char const *func_name, char const *usage_str) {
367    bool correct_usage = false;
368    bool skipCall = false;
369    if (strict)
370        correct_usage = ((actual & desired) == desired);
371    else
372        correct_usage = ((actual & desired) != 0);
373    if (!correct_usage) {
374        skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_type, obj_handle, __LINE__,
375                           MEMTRACK_INVALID_USAGE_FLAG, "MEM", "Invalid usage flag for %s 0x%" PRIxLEAST64
376                                                               " used by %s. In this case, %s should have %s set during creation.",
377                           ty_str, obj_handle, func_name, ty_str, usage_str);
378    }
379    return skipCall;
380}
381
382// Helper function to validate usage flags for images
383// Pulls image info and then sends actual vs. desired usage off to helper above where
384//  an error will be flagged if usage is not correct
385static bool validate_image_usage_flags(layer_data *dev_data, VkImage image, VkFlags desired, VkBool32 strict,
386                                           char const *func_name, char const *usage_string) {
387    bool skipCall = false;
388    auto const image_node = getImageNode(dev_data, image);
389    if (image_node) {
390        skipCall = validate_usage_flags(dev_data, image_node->createInfo.usage, desired, strict, (uint64_t)image,
391                                        VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "image", func_name, usage_string);
392    }
393    return skipCall;
394}
395
396// Helper function to validate usage flags for buffers
397// Pulls buffer info and then sends actual vs. desired usage off to helper above where
398//  an error will be flagged if usage is not correct
399static bool validate_buffer_usage_flags(layer_data *dev_data, VkBuffer buffer, VkFlags desired, VkBool32 strict,
400                                            char const *func_name, char const *usage_string) {
401    bool skipCall = false;
402    auto buffer_node = getBufferNode(dev_data, buffer);
403    if (buffer_node) {
404        skipCall = validate_usage_flags(dev_data, buffer_node->createInfo.usage, desired, strict, (uint64_t)buffer,
405                                        VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "buffer", func_name, usage_string);
406    }
407    return skipCall;
408}
409
410// Return ptr to info in map container containing mem, or NULL if not found
411//  Calls to this function should be wrapped in mutex
412DEVICE_MEM_INFO *getMemObjInfo(const layer_data *dev_data, const VkDeviceMemory mem) {
413    auto mem_it = dev_data->memObjMap.find(mem);
414    if (mem_it == dev_data->memObjMap.end()) {
415        return NULL;
416    }
417    return mem_it->second.get();
418}
419
420static void add_mem_obj_info(layer_data *my_data, void *object, const VkDeviceMemory mem,
421                             const VkMemoryAllocateInfo *pAllocateInfo) {
422    assert(object != NULL);
423
424    my_data->memObjMap[mem] = unique_ptr<DEVICE_MEM_INFO>(new DEVICE_MEM_INFO(object, mem, pAllocateInfo));
425}
426
427static bool validate_memory_is_valid(layer_data *dev_data, VkDeviceMemory mem, const char *functionName,
428                                     VkImage image = VK_NULL_HANDLE) {
429    if (mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
430        auto const image_node = getImageNode(dev_data, image);
431        if (image_node && !image_node->valid) {
432            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
433                           (uint64_t)(mem), __LINE__, MEMTRACK_INVALID_USAGE_FLAG, "MEM",
434                           "%s: Cannot read invalid swapchain image 0x%" PRIx64 ", please fill the memory before using.",
435                           functionName, (uint64_t)(image));
436        }
437    } else {
438        DEVICE_MEM_INFO *pMemObj = getMemObjInfo(dev_data, mem);
439        if (pMemObj && !pMemObj->valid) {
440            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
441                           (uint64_t)(mem), __LINE__, MEMTRACK_INVALID_USAGE_FLAG, "MEM",
442                           "%s: Cannot read invalid memory 0x%" PRIx64 ", please fill the memory before using.", functionName,
443                           (uint64_t)(mem));
444        }
445    }
446    return false;
447}
448
449static void set_memory_valid(layer_data *dev_data, VkDeviceMemory mem, bool valid, VkImage image = VK_NULL_HANDLE) {
450    if (mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
451        auto image_node = getImageNode(dev_data, image);
452        if (image_node) {
453            image_node->valid = valid;
454        }
455    } else {
456        DEVICE_MEM_INFO *pMemObj = getMemObjInfo(dev_data, mem);
457        if (pMemObj) {
458            pMemObj->valid = valid;
459        }
460    }
461}
462
463// Find CB Info and add mem reference to list container
464// Find Mem Obj Info and add CB reference to list container
465static bool update_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb, const VkDeviceMemory mem,
466                                              const char *apiName) {
467    bool skipCall = false;
468
469    // Skip validation if this image was created through WSI
470    if (mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
471
472        // First update CB binding in MemObj mini CB list
473        DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, mem);
474        if (pMemInfo) {
475            pMemInfo->commandBufferBindings.insert(cb);
476            // Now update CBInfo's Mem reference list
477            GLOBAL_CB_NODE *pCBNode = getCBNode(dev_data, cb);
478            // TODO: keep track of all destroyed CBs so we know if this is a stale or simply invalid object
479            if (pCBNode) {
480                pCBNode->memObjs.insert(mem);
481            }
482        }
483    }
484    return skipCall;
485}
486// For every mem obj bound to particular CB, free bindings related to that CB
487static void clear_cmd_buf_and_mem_references(layer_data *dev_data, GLOBAL_CB_NODE *pCBNode) {
488    if (pCBNode) {
489        if (pCBNode->memObjs.size() > 0) {
490            for (auto mem : pCBNode->memObjs) {
491                DEVICE_MEM_INFO *pInfo = getMemObjInfo(dev_data, mem);
492                if (pInfo) {
493                    pInfo->commandBufferBindings.erase(pCBNode->commandBuffer);
494                }
495            }
496            pCBNode->memObjs.clear();
497        }
498        pCBNode->validate_functions.clear();
499    }
500}
501// Overloaded call to above function when GLOBAL_CB_NODE has not already been looked-up
502static void clear_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb) {
503    clear_cmd_buf_and_mem_references(dev_data, getCBNode(dev_data, cb));
504}
505
506// For given MemObjInfo, report Obj & CB bindings
507static bool reportMemReferencesAndCleanUp(layer_data *dev_data, DEVICE_MEM_INFO *pMemObjInfo) {
508    bool skipCall = false;
509    size_t cmdBufRefCount = pMemObjInfo->commandBufferBindings.size();
510    size_t objRefCount = pMemObjInfo->objBindings.size();
511
512    if ((pMemObjInfo->commandBufferBindings.size()) != 0) {
513        skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
514                           (uint64_t)pMemObjInfo->mem, __LINE__, MEMTRACK_FREED_MEM_REF, "MEM",
515                           "Attempting to free memory object 0x%" PRIxLEAST64 " which still contains " PRINTF_SIZE_T_SPECIFIER
516                           " references",
517                           (uint64_t)pMemObjInfo->mem, (cmdBufRefCount + objRefCount));
518    }
519
520    if (cmdBufRefCount > 0 && pMemObjInfo->commandBufferBindings.size() > 0) {
521        for (auto cb : pMemObjInfo->commandBufferBindings) {
522            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
523                    (uint64_t)cb, __LINE__, MEMTRACK_FREED_MEM_REF, "MEM",
524                    "Command Buffer 0x%p still has a reference to mem obj 0x%" PRIxLEAST64, cb, (uint64_t)pMemObjInfo->mem);
525        }
526        // Clear the list of hanging references
527        pMemObjInfo->commandBufferBindings.clear();
528    }
529
530    if (objRefCount > 0 && pMemObjInfo->objBindings.size() > 0) {
531        for (auto obj : pMemObjInfo->objBindings) {
532            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, obj.type, obj.handle, __LINE__,
533                    MEMTRACK_FREED_MEM_REF, "MEM", "VK Object 0x%" PRIxLEAST64 " still has a reference to mem obj 0x%" PRIxLEAST64,
534                    obj.handle, (uint64_t)pMemObjInfo->mem);
535        }
536        // Clear the list of hanging references
537        pMemObjInfo->objBindings.clear();
538    }
539    return skipCall;
540}
541
542static bool deleteMemObjInfo(layer_data *my_data, void *object, VkDeviceMemory mem) {
543    bool skipCall = false;
544    auto item = my_data->memObjMap.find(mem);
545    if (item != my_data->memObjMap.end()) {
546        my_data->memObjMap.erase(item);
547    } else {
548        skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
549                           (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MEM_OBJ, "MEM",
550                           "Request to delete memory object 0x%" PRIxLEAST64 " not present in memory Object Map", (uint64_t)mem);
551    }
552    return skipCall;
553}
554
555static bool freeMemObjInfo(layer_data *dev_data, void *object, VkDeviceMemory mem, bool internal) {
556    bool skipCall = false;
557    // Parse global list to find info w/ mem
558    DEVICE_MEM_INFO *pInfo = getMemObjInfo(dev_data, mem);
559    if (pInfo) {
560        if (pInfo->allocInfo.allocationSize == 0 && !internal) {
561            // TODO: Verify against Valid Use section
562            skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
563                               (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MEM_OBJ, "MEM",
564                               "Attempting to free memory associated with a Persistent Image, 0x%" PRIxLEAST64 ", "
565                               "this should not be explicitly freed\n",
566                               (uint64_t)mem);
567        } else {
568            // Clear any CB bindings for completed CBs
569            //   TODO : Is there a better place to do this?
570
571            assert(pInfo->object != VK_NULL_HANDLE);
572            // clear_cmd_buf_and_mem_references removes elements from
573            // pInfo->commandBufferBindings -- this copy not needed in c++14,
574            // and probably not needed in practice in c++11
575            auto bindings = pInfo->commandBufferBindings;
576            for (auto cb : bindings) {
577                if (!dev_data->globalInFlightCmdBuffers.count(cb)) {
578                    clear_cmd_buf_and_mem_references(dev_data, cb);
579                }
580            }
581
582            // Now verify that no references to this mem obj remain and remove bindings
583            if (pInfo->commandBufferBindings.size() || pInfo->objBindings.size()) {
584                skipCall |= reportMemReferencesAndCleanUp(dev_data, pInfo);
585            }
586            // Delete mem obj info
587            skipCall |= deleteMemObjInfo(dev_data, object, mem);
588        }
589    }
590    return skipCall;
591}
592
593static const char *object_type_to_string(VkDebugReportObjectTypeEXT type) {
594    switch (type) {
595    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT:
596        return "image";
597    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
598        return "buffer";
599    case VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT:
600        return "swapchain";
601    default:
602        return "unknown";
603    }
604}
605
606// Remove object binding performs 3 tasks:
607// 1. Remove ObjectInfo from MemObjInfo list container of obj bindings & free it
608// 2. Clear mem binding for image/buffer by setting its handle to 0
609// TODO : This only applied to Buffer, Image, and Swapchain objects now, how should it be updated/customized?
610static bool clear_object_binding(layer_data *dev_data, uint64_t handle, VkDebugReportObjectTypeEXT type) {
611    // TODO : Need to customize images/buffers/swapchains to track mem binding and clear it here appropriately
612    bool skipCall = false;
613    VkDeviceMemory *pMemBinding = get_object_mem_binding(dev_data, handle, type);
614    if (pMemBinding) {
615        DEVICE_MEM_INFO *pMemObjInfo = getMemObjInfo(dev_data, *pMemBinding);
616        // TODO : Make sure this is a reasonable way to reset mem binding
617        *pMemBinding = VK_NULL_HANDLE;
618        if (pMemObjInfo) {
619            // This obj is bound to a memory object. Remove the reference to this object in that memory object's list,
620            // and set the objects memory binding pointer to NULL.
621            if (!pMemObjInfo->objBindings.erase({handle, type})) {
622                skipCall |=
623                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT,
624                            "MEM", "While trying to clear mem binding for %s obj 0x%" PRIxLEAST64
625                                   ", unable to find that object referenced by mem obj 0x%" PRIxLEAST64,
626                            object_type_to_string(type), handle, (uint64_t)pMemObjInfo->mem);
627            }
628        }
629    }
630    return skipCall;
631}
632
633// For NULL mem case, output warning
634// Make sure given object is in global object map
635//  IF a previous binding existed, output validation error
636//  Otherwise, add reference from objectInfo to memoryInfo
637//  Add reference off of objInfo
638static bool set_mem_binding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle,
639                                VkDebugReportObjectTypeEXT type, const char *apiName) {
640    bool skipCall = false;
641    // Handle NULL case separately, just clear previous binding & decrement reference
642    if (mem == VK_NULL_HANDLE) {
643        // TODO: Verify against Valid Use section of spec.
644        skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_MEM_OBJ,
645                           "MEM", "In %s, attempting to Bind Obj(0x%" PRIxLEAST64 ") to NULL", apiName, handle);
646    } else {
647        VkDeviceMemory *pMemBinding = get_object_mem_binding(dev_data, handle, type);
648        assert(pMemBinding);
649        DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, mem);
650        if (pMemInfo) {
651            DEVICE_MEM_INFO *pPrevBinding = getMemObjInfo(dev_data, *pMemBinding);
652            if (pPrevBinding != NULL) {
653                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
654                                    VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_REBIND_OBJECT,
655                                    "MEM", "In %s, attempting to bind memory (0x%" PRIxLEAST64 ") to object (0x%" PRIxLEAST64
656                                           ") which has already been bound to mem object 0x%" PRIxLEAST64,
657                                    apiName, (uint64_t)mem, handle, (uint64_t)pPrevBinding->mem);
658            } else {
659                pMemInfo->objBindings.insert({handle, type});
660                // For image objects, make sure default memory state is correctly set
661                // TODO : What's the best/correct way to handle this?
662                if (VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT == type) {
663                    auto const image_node = getImageNode(dev_data, VkImage(handle));
664                    if (image_node) {
665                        VkImageCreateInfo ici = image_node->createInfo;
666                        if (ici.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
667                            // TODO::  More memory state transition stuff.
668                        }
669                    }
670                }
671                *pMemBinding = mem;
672            }
673        }
674    }
675    return skipCall;
676}
677
678// For NULL mem case, clear any previous binding Else...
679// Make sure given object is in its object map
680//  IF a previous binding existed, update binding
681//  Add reference from objectInfo to memoryInfo
682//  Add reference off of object's binding info
683// Return VK_TRUE if addition is successful, VK_FALSE otherwise
684static bool set_sparse_mem_binding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle,
685                                       VkDebugReportObjectTypeEXT type, const char *apiName) {
686    bool skipCall = VK_FALSE;
687    // Handle NULL case separately, just clear previous binding & decrement reference
688    if (mem == VK_NULL_HANDLE) {
689        skipCall = clear_object_binding(dev_data, handle, type);
690    } else {
691        VkDeviceMemory *pMemBinding = get_object_mem_binding(dev_data, handle, type);
692        assert(pMemBinding);
693        DEVICE_MEM_INFO *pInfo = getMemObjInfo(dev_data, mem);
694        if (pInfo) {
695            pInfo->objBindings.insert({handle, type});
696            // Need to set mem binding for this object
697            *pMemBinding = mem;
698        }
699    }
700    return skipCall;
701}
702
703// For given Object, get 'mem' obj that it's bound to or NULL if no binding
704static bool get_mem_binding_from_object(layer_data *dev_data, const uint64_t handle,
705                                            const VkDebugReportObjectTypeEXT type, VkDeviceMemory *mem) {
706    bool skipCall = false;
707    *mem = VK_NULL_HANDLE;
708    VkDeviceMemory *pMemBinding = get_object_mem_binding(dev_data, handle, type);
709    if (pMemBinding) {
710        *mem = *pMemBinding;
711    } else {
712        skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT,
713                           "MEM", "Trying to get mem binding for object 0x%" PRIxLEAST64 " but no such object in %s list", handle,
714                           object_type_to_string(type));
715    }
716    return skipCall;
717}
718
719// Print details of MemObjInfo list
720static void print_mem_list(layer_data *dev_data) {
721    // Early out if info is not requested
722    if (!(dev_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
723        return;
724    }
725
726    // Just printing each msg individually for now, may want to package these into single large print
727    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
728            MEMTRACK_NONE, "MEM", "Details of Memory Object list (of size " PRINTF_SIZE_T_SPECIFIER " elements)",
729            dev_data->memObjMap.size());
730    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
731            MEMTRACK_NONE, "MEM", "=============================");
732
733    if (dev_data->memObjMap.size() <= 0)
734        return;
735
736    for (auto ii = dev_data->memObjMap.begin(); ii != dev_data->memObjMap.end(); ++ii) {
737        auto mem_info = (*ii).second.get();
738
739        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
740                __LINE__, MEMTRACK_NONE, "MEM", "    ===MemObjInfo at 0x%p===", (void *)mem_info);
741        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
742                __LINE__, MEMTRACK_NONE, "MEM", "    Mem object: 0x%" PRIxLEAST64, (uint64_t)(mem_info->mem));
743        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
744                __LINE__, MEMTRACK_NONE, "MEM", "    Ref Count: " PRINTF_SIZE_T_SPECIFIER,
745                mem_info->commandBufferBindings.size() + mem_info->objBindings.size());
746        if (0 != mem_info->allocInfo.allocationSize) {
747            string pAllocInfoMsg = vk_print_vkmemoryallocateinfo(&mem_info->allocInfo, "MEM(INFO):         ");
748            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
749                    __LINE__, MEMTRACK_NONE, "MEM", "    Mem Alloc info:\n%s", pAllocInfoMsg.c_str());
750        } else {
751            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
752                    __LINE__, MEMTRACK_NONE, "MEM", "    Mem Alloc info is NULL (alloc done by vkCreateSwapchainKHR())");
753        }
754
755        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
756                __LINE__, MEMTRACK_NONE, "MEM", "    VK OBJECT Binding list of size " PRINTF_SIZE_T_SPECIFIER " elements:",
757                mem_info->objBindings.size());
758        if (mem_info->objBindings.size() > 0) {
759            for (auto obj : mem_info->objBindings) {
760                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
761                        0, __LINE__, MEMTRACK_NONE, "MEM", "       VK OBJECT 0x%" PRIx64, obj.handle);
762            }
763        }
764
765        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
766                __LINE__, MEMTRACK_NONE, "MEM",
767                "    VK Command Buffer (CB) binding list of size " PRINTF_SIZE_T_SPECIFIER " elements",
768                mem_info->commandBufferBindings.size());
769        if (mem_info->commandBufferBindings.size() > 0) {
770            for (auto cb : mem_info->commandBufferBindings) {
771                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
772                        0, __LINE__, MEMTRACK_NONE, "MEM", "      VK CB 0x%p", cb);
773            }
774        }
775    }
776}
777
778static void printCBList(layer_data *my_data) {
779    GLOBAL_CB_NODE *pCBInfo = NULL;
780
781    // Early out if info is not requested
782    if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
783        return;
784    }
785
786    log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
787            MEMTRACK_NONE, "MEM", "Details of CB list (of size " PRINTF_SIZE_T_SPECIFIER " elements)",
788            my_data->commandBufferMap.size());
789    log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
790            MEMTRACK_NONE, "MEM", "==================");
791
792    if (my_data->commandBufferMap.size() <= 0)
793        return;
794
795    for (auto &cb_node : my_data->commandBufferMap) {
796        pCBInfo = cb_node.second;
797
798        log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
799                __LINE__, MEMTRACK_NONE, "MEM", "    CB Info (0x%p) has CB 0x%p", (void *)pCBInfo, (void *)pCBInfo->commandBuffer);
800
801        if (pCBInfo->memObjs.size() <= 0)
802            continue;
803        for (auto obj : pCBInfo->memObjs) {
804            log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
805                    __LINE__, MEMTRACK_NONE, "MEM", "      Mem obj 0x%" PRIx64, (uint64_t)obj);
806        }
807    }
808}
809
810// Return a string representation of CMD_TYPE enum
811static string cmdTypeToString(CMD_TYPE cmd) {
812    switch (cmd) {
813    case CMD_BINDPIPELINE:
814        return "CMD_BINDPIPELINE";
815    case CMD_BINDPIPELINEDELTA:
816        return "CMD_BINDPIPELINEDELTA";
817    case CMD_SETVIEWPORTSTATE:
818        return "CMD_SETVIEWPORTSTATE";
819    case CMD_SETLINEWIDTHSTATE:
820        return "CMD_SETLINEWIDTHSTATE";
821    case CMD_SETDEPTHBIASSTATE:
822        return "CMD_SETDEPTHBIASSTATE";
823    case CMD_SETBLENDSTATE:
824        return "CMD_SETBLENDSTATE";
825    case CMD_SETDEPTHBOUNDSSTATE:
826        return "CMD_SETDEPTHBOUNDSSTATE";
827    case CMD_SETSTENCILREADMASKSTATE:
828        return "CMD_SETSTENCILREADMASKSTATE";
829    case CMD_SETSTENCILWRITEMASKSTATE:
830        return "CMD_SETSTENCILWRITEMASKSTATE";
831    case CMD_SETSTENCILREFERENCESTATE:
832        return "CMD_SETSTENCILREFERENCESTATE";
833    case CMD_BINDDESCRIPTORSETS:
834        return "CMD_BINDDESCRIPTORSETS";
835    case CMD_BINDINDEXBUFFER:
836        return "CMD_BINDINDEXBUFFER";
837    case CMD_BINDVERTEXBUFFER:
838        return "CMD_BINDVERTEXBUFFER";
839    case CMD_DRAW:
840        return "CMD_DRAW";
841    case CMD_DRAWINDEXED:
842        return "CMD_DRAWINDEXED";
843    case CMD_DRAWINDIRECT:
844        return "CMD_DRAWINDIRECT";
845    case CMD_DRAWINDEXEDINDIRECT:
846        return "CMD_DRAWINDEXEDINDIRECT";
847    case CMD_DISPATCH:
848        return "CMD_DISPATCH";
849    case CMD_DISPATCHINDIRECT:
850        return "CMD_DISPATCHINDIRECT";
851    case CMD_COPYBUFFER:
852        return "CMD_COPYBUFFER";
853    case CMD_COPYIMAGE:
854        return "CMD_COPYIMAGE";
855    case CMD_BLITIMAGE:
856        return "CMD_BLITIMAGE";
857    case CMD_COPYBUFFERTOIMAGE:
858        return "CMD_COPYBUFFERTOIMAGE";
859    case CMD_COPYIMAGETOBUFFER:
860        return "CMD_COPYIMAGETOBUFFER";
861    case CMD_CLONEIMAGEDATA:
862        return "CMD_CLONEIMAGEDATA";
863    case CMD_UPDATEBUFFER:
864        return "CMD_UPDATEBUFFER";
865    case CMD_FILLBUFFER:
866        return "CMD_FILLBUFFER";
867    case CMD_CLEARCOLORIMAGE:
868        return "CMD_CLEARCOLORIMAGE";
869    case CMD_CLEARATTACHMENTS:
870        return "CMD_CLEARCOLORATTACHMENT";
871    case CMD_CLEARDEPTHSTENCILIMAGE:
872        return "CMD_CLEARDEPTHSTENCILIMAGE";
873    case CMD_RESOLVEIMAGE:
874        return "CMD_RESOLVEIMAGE";
875    case CMD_SETEVENT:
876        return "CMD_SETEVENT";
877    case CMD_RESETEVENT:
878        return "CMD_RESETEVENT";
879    case CMD_WAITEVENTS:
880        return "CMD_WAITEVENTS";
881    case CMD_PIPELINEBARRIER:
882        return "CMD_PIPELINEBARRIER";
883    case CMD_BEGINQUERY:
884        return "CMD_BEGINQUERY";
885    case CMD_ENDQUERY:
886        return "CMD_ENDQUERY";
887    case CMD_RESETQUERYPOOL:
888        return "CMD_RESETQUERYPOOL";
889    case CMD_COPYQUERYPOOLRESULTS:
890        return "CMD_COPYQUERYPOOLRESULTS";
891    case CMD_WRITETIMESTAMP:
892        return "CMD_WRITETIMESTAMP";
893    case CMD_INITATOMICCOUNTERS:
894        return "CMD_INITATOMICCOUNTERS";
895    case CMD_LOADATOMICCOUNTERS:
896        return "CMD_LOADATOMICCOUNTERS";
897    case CMD_SAVEATOMICCOUNTERS:
898        return "CMD_SAVEATOMICCOUNTERS";
899    case CMD_BEGINRENDERPASS:
900        return "CMD_BEGINRENDERPASS";
901    case CMD_ENDRENDERPASS:
902        return "CMD_ENDRENDERPASS";
903    default:
904        return "UNKNOWN";
905    }
906}
907
908// SPIRV utility functions
909static void build_def_index(shader_module *module) {
910    for (auto insn : *module) {
911        switch (insn.opcode()) {
912        /* Types */
913        case spv::OpTypeVoid:
914        case spv::OpTypeBool:
915        case spv::OpTypeInt:
916        case spv::OpTypeFloat:
917        case spv::OpTypeVector:
918        case spv::OpTypeMatrix:
919        case spv::OpTypeImage:
920        case spv::OpTypeSampler:
921        case spv::OpTypeSampledImage:
922        case spv::OpTypeArray:
923        case spv::OpTypeRuntimeArray:
924        case spv::OpTypeStruct:
925        case spv::OpTypeOpaque:
926        case spv::OpTypePointer:
927        case spv::OpTypeFunction:
928        case spv::OpTypeEvent:
929        case spv::OpTypeDeviceEvent:
930        case spv::OpTypeReserveId:
931        case spv::OpTypeQueue:
932        case spv::OpTypePipe:
933            module->def_index[insn.word(1)] = insn.offset();
934            break;
935
936        /* Fixed constants */
937        case spv::OpConstantTrue:
938        case spv::OpConstantFalse:
939        case spv::OpConstant:
940        case spv::OpConstantComposite:
941        case spv::OpConstantSampler:
942        case spv::OpConstantNull:
943            module->def_index[insn.word(2)] = insn.offset();
944            break;
945
946        /* Specialization constants */
947        case spv::OpSpecConstantTrue:
948        case spv::OpSpecConstantFalse:
949        case spv::OpSpecConstant:
950        case spv::OpSpecConstantComposite:
951        case spv::OpSpecConstantOp:
952            module->def_index[insn.word(2)] = insn.offset();
953            break;
954
955        /* Variables */
956        case spv::OpVariable:
957            module->def_index[insn.word(2)] = insn.offset();
958            break;
959
960        /* Functions */
961        case spv::OpFunction:
962            module->def_index[insn.word(2)] = insn.offset();
963            break;
964
965        default:
966            /* We don't care about any other defs for now. */
967            break;
968        }
969    }
970}
971
972static spirv_inst_iter find_entrypoint(shader_module *src, char const *name, VkShaderStageFlagBits stageBits) {
973    for (auto insn : *src) {
974        if (insn.opcode() == spv::OpEntryPoint) {
975            auto entrypointName = (char const *)&insn.word(3);
976            auto entrypointStageBits = 1u << insn.word(1);
977
978            if (!strcmp(entrypointName, name) && (entrypointStageBits & stageBits)) {
979                return insn;
980            }
981        }
982    }
983
984    return src->end();
985}
986
987static char const *storage_class_name(unsigned sc) {
988    switch (sc) {
989    case spv::StorageClassInput:
990        return "input";
991    case spv::StorageClassOutput:
992        return "output";
993    case spv::StorageClassUniformConstant:
994        return "const uniform";
995    case spv::StorageClassUniform:
996        return "uniform";
997    case spv::StorageClassWorkgroup:
998        return "workgroup local";
999    case spv::StorageClassCrossWorkgroup:
1000        return "workgroup global";
1001    case spv::StorageClassPrivate:
1002        return "private global";
1003    case spv::StorageClassFunction:
1004        return "function";
1005    case spv::StorageClassGeneric:
1006        return "generic";
1007    case spv::StorageClassAtomicCounter:
1008        return "atomic counter";
1009    case spv::StorageClassImage:
1010        return "image";
1011    case spv::StorageClassPushConstant:
1012        return "push constant";
1013    default:
1014        return "unknown";
1015    }
1016}
1017
1018/* get the value of an integral constant */
1019unsigned get_constant_value(shader_module const *src, unsigned id) {
1020    auto value = src->get_def(id);
1021    assert(value != src->end());
1022
1023    if (value.opcode() != spv::OpConstant) {
1024        /* TODO: Either ensure that the specialization transform is already performed on a module we're
1025            considering here, OR -- specialize on the fly now.
1026            */
1027        return 1;
1028    }
1029
1030    return value.word(3);
1031}
1032
1033
1034static void describe_type_inner(std::ostringstream &ss, shader_module const *src, unsigned type) {
1035    auto insn = src->get_def(type);
1036    assert(insn != src->end());
1037
1038    switch (insn.opcode()) {
1039    case spv::OpTypeBool:
1040        ss << "bool";
1041        break;
1042    case spv::OpTypeInt:
1043        ss << (insn.word(3) ? 's' : 'u') << "int" << insn.word(2);
1044        break;
1045    case spv::OpTypeFloat:
1046        ss << "float" << insn.word(2);
1047        break;
1048    case spv::OpTypeVector:
1049        ss << "vec" << insn.word(3) << " of ";
1050        describe_type_inner(ss, src, insn.word(2));
1051        break;
1052    case spv::OpTypeMatrix:
1053        ss << "mat" << insn.word(3) << " of ";
1054        describe_type_inner(ss, src, insn.word(2));
1055        break;
1056    case spv::OpTypeArray:
1057        ss << "arr[" << get_constant_value(src, insn.word(3)) << "] of ";
1058        describe_type_inner(ss, src, insn.word(2));
1059        break;
1060    case spv::OpTypePointer:
1061        ss << "ptr to " << storage_class_name(insn.word(2)) << " ";
1062        describe_type_inner(ss, src, insn.word(3));
1063        break;
1064    case spv::OpTypeStruct: {
1065        ss << "struct of (";
1066        for (unsigned i = 2; i < insn.len(); i++) {
1067            describe_type_inner(ss, src, insn.word(i));
1068            if (i == insn.len() - 1) {
1069                ss << ")";
1070            } else {
1071                ss << ", ";
1072            }
1073        }
1074        break;
1075    }
1076    case spv::OpTypeSampler:
1077        ss << "sampler";
1078        break;
1079    case spv::OpTypeSampledImage:
1080        ss << "sampler+";
1081        describe_type_inner(ss, src, insn.word(2));
1082        break;
1083    case spv::OpTypeImage:
1084        ss << "image(dim=" << insn.word(3) << ", sampled=" << insn.word(7) << ")";
1085        break;
1086    default:
1087        ss << "oddtype";
1088        break;
1089    }
1090}
1091
1092
1093static std::string describe_type(shader_module const *src, unsigned type) {
1094    std::ostringstream ss;
1095    describe_type_inner(ss, src, type);
1096    return ss.str();
1097}
1098
1099
1100static bool is_narrow_numeric_type(spirv_inst_iter type)
1101{
1102    if (type.opcode() != spv::OpTypeInt && type.opcode() != spv::OpTypeFloat)
1103        return false;
1104    return type.word(2) < 64;
1105}
1106
1107
1108static bool types_match(shader_module const *a, shader_module const *b, unsigned a_type, unsigned b_type, bool a_arrayed, bool b_arrayed, bool relaxed) {
1109    /* walk two type trees together, and complain about differences */
1110    auto a_insn = a->get_def(a_type);
1111    auto b_insn = b->get_def(b_type);
1112    assert(a_insn != a->end());
1113    assert(b_insn != b->end());
1114
1115    if (a_arrayed && a_insn.opcode() == spv::OpTypeArray) {
1116        return types_match(a, b, a_insn.word(2), b_type, false, b_arrayed, relaxed);
1117    }
1118
1119    if (b_arrayed && b_insn.opcode() == spv::OpTypeArray) {
1120        /* we probably just found the extra level of arrayness in b_type: compare the type inside it to a_type */
1121        return types_match(a, b, a_type, b_insn.word(2), a_arrayed, false, relaxed);
1122    }
1123
1124    if (a_insn.opcode() == spv::OpTypeVector && relaxed && is_narrow_numeric_type(b_insn)) {
1125        return types_match(a, b, a_insn.word(2), b_type, a_arrayed, b_arrayed, false);
1126    }
1127
1128    if (a_insn.opcode() != b_insn.opcode()) {
1129        return false;
1130    }
1131
1132    if (a_insn.opcode() == spv::OpTypePointer) {
1133        /* match on pointee type. storage class is expected to differ */
1134        return types_match(a, b, a_insn.word(3), b_insn.word(3), a_arrayed, b_arrayed, relaxed);
1135    }
1136
1137    if (a_arrayed || b_arrayed) {
1138        /* if we havent resolved array-of-verts by here, we're not going to. */
1139        return false;
1140    }
1141
1142    switch (a_insn.opcode()) {
1143    case spv::OpTypeBool:
1144        return true;
1145    case spv::OpTypeInt:
1146        /* match on width, signedness */
1147        return a_insn.word(2) == b_insn.word(2) && a_insn.word(3) == b_insn.word(3);
1148    case spv::OpTypeFloat:
1149        /* match on width */
1150        return a_insn.word(2) == b_insn.word(2);
1151    case spv::OpTypeVector:
1152        /* match on element type, count. */
1153        if (!types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false))
1154            return false;
1155        if (relaxed && is_narrow_numeric_type(a->get_def(a_insn.word(2)))) {
1156            return a_insn.word(3) >= b_insn.word(3);
1157        }
1158        else {
1159            return a_insn.word(3) == b_insn.word(3);
1160        }
1161    case spv::OpTypeMatrix:
1162        /* match on element type, count. */
1163        return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false) && a_insn.word(3) == b_insn.word(3);
1164    case spv::OpTypeArray:
1165        /* match on element type, count. these all have the same layout. we don't get here if
1166         * b_arrayed. This differs from vector & matrix types in that the array size is the id of a constant instruction,
1167         * not a literal within OpTypeArray */
1168        return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false) &&
1169               get_constant_value(a, a_insn.word(3)) == get_constant_value(b, b_insn.word(3));
1170    case spv::OpTypeStruct:
1171        /* match on all element types */
1172        {
1173            if (a_insn.len() != b_insn.len()) {
1174                return false; /* structs cannot match if member counts differ */
1175            }
1176
1177            for (unsigned i = 2; i < a_insn.len(); i++) {
1178                if (!types_match(a, b, a_insn.word(i), b_insn.word(i), a_arrayed, b_arrayed, false)) {
1179                    return false;
1180                }
1181            }
1182
1183            return true;
1184        }
1185    default:
1186        /* remaining types are CLisms, or may not appear in the interfaces we
1187         * are interested in. Just claim no match.
1188         */
1189        return false;
1190    }
1191}
1192
1193static int value_or_default(std::unordered_map<unsigned, unsigned> const &map, unsigned id, int def) {
1194    auto it = map.find(id);
1195    if (it == map.end())
1196        return def;
1197    else
1198        return it->second;
1199}
1200
1201static unsigned get_locations_consumed_by_type(shader_module const *src, unsigned type, bool strip_array_level) {
1202    auto insn = src->get_def(type);
1203    assert(insn != src->end());
1204
1205    switch (insn.opcode()) {
1206    case spv::OpTypePointer:
1207        /* see through the ptr -- this is only ever at the toplevel for graphics shaders;
1208         * we're never actually passing pointers around. */
1209        return get_locations_consumed_by_type(src, insn.word(3), strip_array_level);
1210    case spv::OpTypeArray:
1211        if (strip_array_level) {
1212            return get_locations_consumed_by_type(src, insn.word(2), false);
1213        } else {
1214            return get_constant_value(src, insn.word(3)) * get_locations_consumed_by_type(src, insn.word(2), false);
1215        }
1216    case spv::OpTypeMatrix:
1217        /* num locations is the dimension * element size */
1218        return insn.word(3) * get_locations_consumed_by_type(src, insn.word(2), false);
1219    case spv::OpTypeVector: {
1220        auto scalar_type = src->get_def(insn.word(2));
1221        auto bit_width = (scalar_type.opcode() == spv::OpTypeInt || scalar_type.opcode() == spv::OpTypeFloat) ?
1222            scalar_type.word(2) : 32;
1223
1224        /* locations are 128-bit wide; 3- and 4-component vectors of 64 bit
1225         * types require two. */
1226        return (bit_width * insn.word(3) + 127) / 128;
1227    }
1228    default:
1229        /* everything else is just 1. */
1230        return 1;
1231
1232        /* TODO: extend to handle 64bit scalar types, whose vectors may need
1233         * multiple locations. */
1234    }
1235}
1236
1237static unsigned get_locations_consumed_by_format(VkFormat format) {
1238    switch (format) {
1239    case VK_FORMAT_R64G64B64A64_SFLOAT:
1240    case VK_FORMAT_R64G64B64A64_SINT:
1241    case VK_FORMAT_R64G64B64A64_UINT:
1242    case VK_FORMAT_R64G64B64_SFLOAT:
1243    case VK_FORMAT_R64G64B64_SINT:
1244    case VK_FORMAT_R64G64B64_UINT:
1245        return 2;
1246    default:
1247        return 1;
1248    }
1249}
1250
1251typedef std::pair<unsigned, unsigned> location_t;
1252typedef std::pair<unsigned, unsigned> descriptor_slot_t;
1253
1254struct interface_var {
1255    uint32_t id;
1256    uint32_t type_id;
1257    uint32_t offset;
1258    bool is_patch;
1259    bool is_block_member;
1260    /* TODO: collect the name, too? Isn't required to be present. */
1261};
1262
1263struct shader_stage_attributes {
1264    char const *const name;
1265    bool arrayed_input;
1266    bool arrayed_output;
1267};
1268
1269static shader_stage_attributes shader_stage_attribs[] = {
1270    {"vertex shader", false, false},
1271    {"tessellation control shader", true, true},
1272    {"tessellation evaluation shader", true, false},
1273    {"geometry shader", true, false},
1274    {"fragment shader", false, false},
1275};
1276
1277static spirv_inst_iter get_struct_type(shader_module const *src, spirv_inst_iter def, bool is_array_of_verts) {
1278    while (true) {
1279
1280        if (def.opcode() == spv::OpTypePointer) {
1281            def = src->get_def(def.word(3));
1282        } else if (def.opcode() == spv::OpTypeArray && is_array_of_verts) {
1283            def = src->get_def(def.word(2));
1284            is_array_of_verts = false;
1285        } else if (def.opcode() == spv::OpTypeStruct) {
1286            return def;
1287        } else {
1288            return src->end();
1289        }
1290    }
1291}
1292
1293static void collect_interface_block_members(shader_module const *src,
1294                                            std::map<location_t, interface_var> &out,
1295                                            std::unordered_map<unsigned, unsigned> const &blocks, bool is_array_of_verts,
1296                                            uint32_t id, uint32_t type_id, bool is_patch) {
1297    /* Walk down the type_id presented, trying to determine whether it's actually an interface block. */
1298    auto type = get_struct_type(src, src->get_def(type_id), is_array_of_verts && !is_patch);
1299    if (type == src->end() || blocks.find(type.word(1)) == blocks.end()) {
1300        /* this isn't an interface block. */
1301        return;
1302    }
1303
1304    std::unordered_map<unsigned, unsigned> member_components;
1305
1306    /* Walk all the OpMemberDecorate for type's result id -- first pass, collect components. */
1307    for (auto insn : *src) {
1308        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1309            unsigned member_index = insn.word(2);
1310
1311            if (insn.word(3) == spv::DecorationComponent) {
1312                unsigned component = insn.word(4);
1313                member_components[member_index] = component;
1314            }
1315        }
1316    }
1317
1318    /* Second pass -- produce the output, from Location decorations */
1319    for (auto insn : *src) {
1320        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1321            unsigned member_index = insn.word(2);
1322            unsigned member_type_id = type.word(2 + member_index);
1323
1324            if (insn.word(3) == spv::DecorationLocation) {
1325                unsigned location = insn.word(4);
1326                unsigned num_locations = get_locations_consumed_by_type(src, member_type_id, false);
1327                auto component_it = member_components.find(member_index);
1328                unsigned component = component_it == member_components.end() ? 0 : component_it->second;
1329
1330                for (unsigned int offset = 0; offset < num_locations; offset++) {
1331                    interface_var v;
1332                    v.id = id;
1333                    /* TODO: member index in interface_var too? */
1334                    v.type_id = member_type_id;
1335                    v.offset = offset;
1336                    v.is_patch = is_patch;
1337                    v.is_block_member = true;
1338                    out[std::make_pair(location + offset, component)] = v;
1339                }
1340            }
1341        }
1342    }
1343}
1344
1345static void collect_interface_by_location(shader_module const *src, spirv_inst_iter entrypoint,
1346                                          spv::StorageClass sinterface, std::map<location_t, interface_var> &out,
1347                                          bool is_array_of_verts) {
1348    std::unordered_map<unsigned, unsigned> var_locations;
1349    std::unordered_map<unsigned, unsigned> var_builtins;
1350    std::unordered_map<unsigned, unsigned> var_components;
1351    std::unordered_map<unsigned, unsigned> blocks;
1352    std::unordered_map<unsigned, unsigned> var_patch;
1353
1354    for (auto insn : *src) {
1355
1356        /* We consider two interface models: SSO rendezvous-by-location, and
1357         * builtins. Complain about anything that fits neither model.
1358         */
1359        if (insn.opcode() == spv::OpDecorate) {
1360            if (insn.word(2) == spv::DecorationLocation) {
1361                var_locations[insn.word(1)] = insn.word(3);
1362            }
1363
1364            if (insn.word(2) == spv::DecorationBuiltIn) {
1365                var_builtins[insn.word(1)] = insn.word(3);
1366            }
1367
1368            if (insn.word(2) == spv::DecorationComponent) {
1369                var_components[insn.word(1)] = insn.word(3);
1370            }
1371
1372            if (insn.word(2) == spv::DecorationBlock) {
1373                blocks[insn.word(1)] = 1;
1374            }
1375
1376            if (insn.word(2) == spv::DecorationPatch) {
1377                var_patch[insn.word(1)] = 1;
1378            }
1379        }
1380    }
1381
1382    /* TODO: handle grouped decorations */
1383    /* TODO: handle index=1 dual source outputs from FS -- two vars will
1384     * have the same location, and we DON'T want to clobber. */
1385
1386    /* find the end of the entrypoint's name string. additional zero bytes follow the actual null
1387       terminator, to fill out the rest of the word - so we only need to look at the last byte in
1388       the word to determine which word contains the terminator. */
1389    uint32_t word = 3;
1390    while (entrypoint.word(word) & 0xff000000u) {
1391        ++word;
1392    }
1393    ++word;
1394
1395    for (; word < entrypoint.len(); word++) {
1396        auto insn = src->get_def(entrypoint.word(word));
1397        assert(insn != src->end());
1398        assert(insn.opcode() == spv::OpVariable);
1399
1400        if (insn.word(3) == static_cast<uint32_t>(sinterface)) {
1401            unsigned id = insn.word(2);
1402            unsigned type = insn.word(1);
1403
1404            int location = value_or_default(var_locations, id, -1);
1405            int builtin = value_or_default(var_builtins, id, -1);
1406            unsigned component = value_or_default(var_components, id, 0); /* unspecified is OK, is 0 */
1407            bool is_patch = var_patch.find(id) != var_patch.end();
1408
1409            /* All variables and interface block members in the Input or Output storage classes
1410             * must be decorated with either a builtin or an explicit location.
1411             *
1412             * TODO: integrate the interface block support here. For now, don't complain --
1413             * a valid SPIRV module will only hit this path for the interface block case, as the
1414             * individual members of the type are decorated, rather than variable declarations.
1415             */
1416
1417            if (location != -1) {
1418                /* A user-defined interface variable, with a location. Where a variable
1419                 * occupied multiple locations, emit one result for each. */
1420                unsigned num_locations = get_locations_consumed_by_type(src, type, is_array_of_verts && !is_patch);
1421                for (unsigned int offset = 0; offset < num_locations; offset++) {
1422                    interface_var v;
1423                    v.id = id;
1424                    v.type_id = type;
1425                    v.offset = offset;
1426                    v.is_patch = is_patch;
1427                    v.is_block_member = false;
1428                    out[std::make_pair(location + offset, component)] = v;
1429                }
1430            } else if (builtin == -1) {
1431                /* An interface block instance */
1432                collect_interface_block_members(src, out, blocks, is_array_of_verts, id, type, is_patch);
1433            }
1434        }
1435    }
1436}
1437
1438static void collect_interface_by_descriptor_slot(debug_report_data *report_data, shader_module const *src,
1439                                                 std::unordered_set<uint32_t> const &accessible_ids,
1440                                                 std::map<descriptor_slot_t, interface_var> &out) {
1441
1442    std::unordered_map<unsigned, unsigned> var_sets;
1443    std::unordered_map<unsigned, unsigned> var_bindings;
1444
1445    for (auto insn : *src) {
1446        /* All variables in the Uniform or UniformConstant storage classes are required to be decorated with both
1447         * DecorationDescriptorSet and DecorationBinding.
1448         */
1449        if (insn.opcode() == spv::OpDecorate) {
1450            if (insn.word(2) == spv::DecorationDescriptorSet) {
1451                var_sets[insn.word(1)] = insn.word(3);
1452            }
1453
1454            if (insn.word(2) == spv::DecorationBinding) {
1455                var_bindings[insn.word(1)] = insn.word(3);
1456            }
1457        }
1458    }
1459
1460    for (auto id : accessible_ids) {
1461        auto insn = src->get_def(id);
1462        assert(insn != src->end());
1463
1464        if (insn.opcode() == spv::OpVariable &&
1465            (insn.word(3) == spv::StorageClassUniform || insn.word(3) == spv::StorageClassUniformConstant)) {
1466            unsigned set = value_or_default(var_sets, insn.word(2), 0);
1467            unsigned binding = value_or_default(var_bindings, insn.word(2), 0);
1468
1469            auto existing_it = out.find(std::make_pair(set, binding));
1470            if (existing_it != out.end()) {
1471                /* conflict within spv image */
1472                log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1473                        __LINE__, SHADER_CHECKER_INCONSISTENT_SPIRV, "SC",
1474                        "var %d (type %d) in %s interface in descriptor slot (%u,%u) conflicts with existing definition",
1475                        insn.word(2), insn.word(1), storage_class_name(insn.word(3)), existing_it->first.first,
1476                        existing_it->first.second);
1477            }
1478
1479            interface_var v;
1480            v.id = insn.word(2);
1481            v.type_id = insn.word(1);
1482            v.offset = 0;
1483            v.is_patch = false;
1484            v.is_block_member = false;
1485            out[std::make_pair(set, binding)] = v;
1486        }
1487    }
1488}
1489
1490static bool validate_interface_between_stages(debug_report_data *report_data, shader_module const *producer,
1491                                              spirv_inst_iter producer_entrypoint, shader_stage_attributes const *producer_stage,
1492                                              shader_module const *consumer, spirv_inst_iter consumer_entrypoint,
1493                                              shader_stage_attributes const *consumer_stage) {
1494    std::map<location_t, interface_var> outputs;
1495    std::map<location_t, interface_var> inputs;
1496
1497    bool pass = true;
1498
1499    collect_interface_by_location(producer, producer_entrypoint, spv::StorageClassOutput, outputs, producer_stage->arrayed_output);
1500    collect_interface_by_location(consumer, consumer_entrypoint, spv::StorageClassInput, inputs, consumer_stage->arrayed_input);
1501
1502    auto a_it = outputs.begin();
1503    auto b_it = inputs.begin();
1504
1505    /* maps sorted by key (location); walk them together to find mismatches */
1506    while ((outputs.size() > 0 && a_it != outputs.end()) || (inputs.size() && b_it != inputs.end())) {
1507        bool a_at_end = outputs.size() == 0 || a_it == outputs.end();
1508        bool b_at_end = inputs.size() == 0 || b_it == inputs.end();
1509        auto a_first = a_at_end ? std::make_pair(0u, 0u) : a_it->first;
1510        auto b_first = b_at_end ? std::make_pair(0u, 0u) : b_it->first;
1511
1512        if (b_at_end || ((!a_at_end) && (a_first < b_first))) {
1513            if (log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1514                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1515                        "%s writes to output location %u.%u which is not consumed by %s", producer_stage->name, a_first.first,
1516                        a_first.second, consumer_stage->name)) {
1517                pass = false;
1518            }
1519            a_it++;
1520        } else if (a_at_end || a_first > b_first) {
1521            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1522                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC",
1523                        "%s consumes input location %u.%u which is not written by %s", consumer_stage->name, b_first.first, b_first.second,
1524                        producer_stage->name)) {
1525                pass = false;
1526            }
1527            b_it++;
1528        } else {
1529            // subtleties of arrayed interfaces:
1530            // - if is_patch, then the member is not arrayed, even though the interface may be.
1531            // - if is_block_member, then the extra array level of an arrayed interface is not
1532            //   expressed in the member type -- it's expressed in the block type.
1533            if (!types_match(producer, consumer, a_it->second.type_id, b_it->second.type_id,
1534                             producer_stage->arrayed_output && !a_it->second.is_patch && !a_it->second.is_block_member,
1535                             consumer_stage->arrayed_input && !b_it->second.is_patch && !b_it->second.is_block_member,
1536                             true)) {
1537                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1538                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC", "Type mismatch on location %u.%u: '%s' vs '%s'",
1539                            a_first.first, a_first.second,
1540                            describe_type(producer, a_it->second.type_id).c_str(),
1541                            describe_type(consumer, b_it->second.type_id).c_str())) {
1542                    pass = false;
1543                }
1544            }
1545            if (a_it->second.is_patch != b_it->second.is_patch) {
1546                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1547                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1548                            "Decoration mismatch on location %u.%u: is per-%s in %s stage but "
1549                            "per-%s in %s stage", a_first.first, a_first.second,
1550                            a_it->second.is_patch ? "patch" : "vertex", producer_stage->name,
1551                            b_it->second.is_patch ? "patch" : "vertex", consumer_stage->name)) {
1552                    pass = false;
1553                }
1554            }
1555            a_it++;
1556            b_it++;
1557        }
1558    }
1559
1560    return pass;
1561}
1562
1563enum FORMAT_TYPE {
1564    FORMAT_TYPE_UNDEFINED,
1565    FORMAT_TYPE_FLOAT, /* UNORM, SNORM, FLOAT, USCALED, SSCALED, SRGB -- anything we consider float in the shader */
1566    FORMAT_TYPE_SINT,
1567    FORMAT_TYPE_UINT,
1568};
1569
1570static unsigned get_format_type(VkFormat fmt) {
1571    switch (fmt) {
1572    case VK_FORMAT_UNDEFINED:
1573        return FORMAT_TYPE_UNDEFINED;
1574    case VK_FORMAT_R8_SINT:
1575    case VK_FORMAT_R8G8_SINT:
1576    case VK_FORMAT_R8G8B8_SINT:
1577    case VK_FORMAT_R8G8B8A8_SINT:
1578    case VK_FORMAT_R16_SINT:
1579    case VK_FORMAT_R16G16_SINT:
1580    case VK_FORMAT_R16G16B16_SINT:
1581    case VK_FORMAT_R16G16B16A16_SINT:
1582    case VK_FORMAT_R32_SINT:
1583    case VK_FORMAT_R32G32_SINT:
1584    case VK_FORMAT_R32G32B32_SINT:
1585    case VK_FORMAT_R32G32B32A32_SINT:
1586    case VK_FORMAT_R64_SINT:
1587    case VK_FORMAT_R64G64_SINT:
1588    case VK_FORMAT_R64G64B64_SINT:
1589    case VK_FORMAT_R64G64B64A64_SINT:
1590    case VK_FORMAT_B8G8R8_SINT:
1591    case VK_FORMAT_B8G8R8A8_SINT:
1592    case VK_FORMAT_A8B8G8R8_SINT_PACK32:
1593    case VK_FORMAT_A2B10G10R10_SINT_PACK32:
1594    case VK_FORMAT_A2R10G10B10_SINT_PACK32:
1595        return FORMAT_TYPE_SINT;
1596    case VK_FORMAT_R8_UINT:
1597    case VK_FORMAT_R8G8_UINT:
1598    case VK_FORMAT_R8G8B8_UINT:
1599    case VK_FORMAT_R8G8B8A8_UINT:
1600    case VK_FORMAT_R16_UINT:
1601    case VK_FORMAT_R16G16_UINT:
1602    case VK_FORMAT_R16G16B16_UINT:
1603    case VK_FORMAT_R16G16B16A16_UINT:
1604    case VK_FORMAT_R32_UINT:
1605    case VK_FORMAT_R32G32_UINT:
1606    case VK_FORMAT_R32G32B32_UINT:
1607    case VK_FORMAT_R32G32B32A32_UINT:
1608    case VK_FORMAT_R64_UINT:
1609    case VK_FORMAT_R64G64_UINT:
1610    case VK_FORMAT_R64G64B64_UINT:
1611    case VK_FORMAT_R64G64B64A64_UINT:
1612    case VK_FORMAT_B8G8R8_UINT:
1613    case VK_FORMAT_B8G8R8A8_UINT:
1614    case VK_FORMAT_A8B8G8R8_UINT_PACK32:
1615    case VK_FORMAT_A2B10G10R10_UINT_PACK32:
1616    case VK_FORMAT_A2R10G10B10_UINT_PACK32:
1617        return FORMAT_TYPE_UINT;
1618    default:
1619        return FORMAT_TYPE_FLOAT;
1620    }
1621}
1622
1623/* characterizes a SPIR-V type appearing in an interface to a FF stage,
1624 * for comparison to a VkFormat's characterization above. */
1625static unsigned get_fundamental_type(shader_module const *src, unsigned type) {
1626    auto insn = src->get_def(type);
1627    assert(insn != src->end());
1628
1629    switch (insn.opcode()) {
1630    case spv::OpTypeInt:
1631        return insn.word(3) ? FORMAT_TYPE_SINT : FORMAT_TYPE_UINT;
1632    case spv::OpTypeFloat:
1633        return FORMAT_TYPE_FLOAT;
1634    case spv::OpTypeVector:
1635        return get_fundamental_type(src, insn.word(2));
1636    case spv::OpTypeMatrix:
1637        return get_fundamental_type(src, insn.word(2));
1638    case spv::OpTypeArray:
1639        return get_fundamental_type(src, insn.word(2));
1640    case spv::OpTypePointer:
1641        return get_fundamental_type(src, insn.word(3));
1642    default:
1643        return FORMAT_TYPE_UNDEFINED;
1644    }
1645}
1646
1647static uint32_t get_shader_stage_id(VkShaderStageFlagBits stage) {
1648    uint32_t bit_pos = u_ffs(stage);
1649    return bit_pos - 1;
1650}
1651
1652static bool validate_vi_consistency(debug_report_data *report_data, VkPipelineVertexInputStateCreateInfo const *vi) {
1653    /* walk the binding descriptions, which describe the step rate and stride of each vertex buffer.
1654     * each binding should be specified only once.
1655     */
1656    std::unordered_map<uint32_t, VkVertexInputBindingDescription const *> bindings;
1657    bool pass = true;
1658
1659    for (unsigned i = 0; i < vi->vertexBindingDescriptionCount; i++) {
1660        auto desc = &vi->pVertexBindingDescriptions[i];
1661        auto &binding = bindings[desc->binding];
1662        if (binding) {
1663            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1664                        __LINE__, SHADER_CHECKER_INCONSISTENT_VI, "SC",
1665                        "Duplicate vertex input binding descriptions for binding %d", desc->binding)) {
1666                pass = false;
1667            }
1668        } else {
1669            binding = desc;
1670        }
1671    }
1672
1673    return pass;
1674}
1675
1676static bool validate_vi_against_vs_inputs(debug_report_data *report_data, VkPipelineVertexInputStateCreateInfo const *vi,
1677                                          shader_module const *vs, spirv_inst_iter entrypoint) {
1678    std::map<location_t, interface_var> inputs;
1679    bool pass = true;
1680
1681    collect_interface_by_location(vs, entrypoint, spv::StorageClassInput, inputs, false);
1682
1683    /* Build index by location */
1684    std::map<uint32_t, VkVertexInputAttributeDescription const *> attribs;
1685    if (vi) {
1686        for (unsigned i = 0; i < vi->vertexAttributeDescriptionCount; i++) {
1687            auto num_locations = get_locations_consumed_by_format(vi->pVertexAttributeDescriptions[i].format);
1688            for (auto j = 0u; j < num_locations; j++) {
1689                attribs[vi->pVertexAttributeDescriptions[i].location + j] = &vi->pVertexAttributeDescriptions[i];
1690            }
1691        }
1692    }
1693
1694    auto it_a = attribs.begin();
1695    auto it_b = inputs.begin();
1696
1697    while ((attribs.size() > 0 && it_a != attribs.end()) || (inputs.size() > 0 && it_b != inputs.end())) {
1698        bool a_at_end = attribs.size() == 0 || it_a == attribs.end();
1699        bool b_at_end = inputs.size() == 0 || it_b == inputs.end();
1700        auto a_first = a_at_end ? 0 : it_a->first;
1701        auto b_first = b_at_end ? 0 : it_b->first.first;
1702        if (!a_at_end && (b_at_end || a_first < b_first)) {
1703            if (log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1704                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1705                        "Vertex attribute at location %d not consumed by VS", a_first)) {
1706                pass = false;
1707            }
1708            it_a++;
1709        } else if (!b_at_end && (a_at_end || b_first < a_first)) {
1710            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1711                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "VS consumes input at location %d but not provided",
1712                        b_first)) {
1713                pass = false;
1714            }
1715            it_b++;
1716        } else {
1717            unsigned attrib_type = get_format_type(it_a->second->format);
1718            unsigned input_type = get_fundamental_type(vs, it_b->second.type_id);
1719
1720            /* type checking */
1721            if (attrib_type != FORMAT_TYPE_UNDEFINED && input_type != FORMAT_TYPE_UNDEFINED && attrib_type != input_type) {
1722                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1723                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1724                            "Attribute type of `%s` at location %d does not match VS input type of `%s`",
1725                            string_VkFormat(it_a->second->format), a_first,
1726                            describe_type(vs, it_b->second.type_id).c_str())) {
1727                    pass = false;
1728                }
1729            }
1730
1731            /* OK! */
1732            it_a++;
1733            it_b++;
1734        }
1735    }
1736
1737    return pass;
1738}
1739
1740static bool validate_fs_outputs_against_render_pass(debug_report_data *report_data, shader_module const *fs,
1741                                                    spirv_inst_iter entrypoint, RENDER_PASS_NODE const *rp, uint32_t subpass) {
1742    std::map<location_t, interface_var> outputs;
1743    std::map<uint32_t, VkFormat> color_attachments;
1744    for (auto i = 0u; i < rp->subpassColorFormats[subpass].size(); i++) {
1745        if (rp->subpassColorFormats[subpass][i] != VK_FORMAT_UNDEFINED) {
1746            color_attachments[i] = rp->subpassColorFormats[subpass][i];
1747        }
1748    }
1749
1750    bool pass = true;
1751
1752    /* TODO: dual source blend index (spv::DecIndex, zero if not provided) */
1753
1754    collect_interface_by_location(fs, entrypoint, spv::StorageClassOutput, outputs, false);
1755
1756    auto it_a = outputs.begin();
1757    auto it_b = color_attachments.begin();
1758
1759    /* Walk attachment list and outputs together */
1760
1761    while ((outputs.size() > 0 && it_a != outputs.end()) || (color_attachments.size() > 0 && it_b != color_attachments.end())) {
1762        bool a_at_end = outputs.size() == 0 || it_a == outputs.end();
1763        bool b_at_end = color_attachments.size() == 0 || it_b == color_attachments.end();
1764
1765        if (!a_at_end && (b_at_end || it_a->first.first < it_b->first)) {
1766            if (log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1767                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1768                        "FS writes to output location %d with no matching attachment", it_a->first.first)) {
1769                pass = false;
1770            }
1771            it_a++;
1772        } else if (!b_at_end && (a_at_end || it_a->first.first > it_b->first)) {
1773            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1774                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "Attachment %d not written by FS", it_b->first)) {
1775                pass = false;
1776            }
1777            it_b++;
1778        } else {
1779            unsigned output_type = get_fundamental_type(fs, it_a->second.type_id);
1780            unsigned att_type = get_format_type(it_b->second);
1781
1782            /* type checking */
1783            if (att_type != FORMAT_TYPE_UNDEFINED && output_type != FORMAT_TYPE_UNDEFINED && att_type != output_type) {
1784                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1785                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1786                            "Attachment %d of type `%s` does not match FS output type of `%s`", it_b->first,
1787                            string_VkFormat(it_b->second),
1788                            describe_type(fs, it_a->second.type_id).c_str())) {
1789                    pass = false;
1790                }
1791            }
1792
1793            /* OK! */
1794            it_a++;
1795            it_b++;
1796        }
1797    }
1798
1799    return pass;
1800}
1801
1802/* For some analyses, we need to know about all ids referenced by the static call tree of a particular
1803 * entrypoint. This is important for identifying the set of shader resources actually used by an entrypoint,
1804 * for example.
1805 * Note: we only explore parts of the image which might actually contain ids we care about for the above analyses.
1806 *  - NOT the shader input/output interfaces.
1807 *
1808 * TODO: The set of interesting opcodes here was determined by eyeballing the SPIRV spec. It might be worth
1809 * converting parts of this to be generated from the machine-readable spec instead.
1810 */
1811static void mark_accessible_ids(shader_module const *src, spirv_inst_iter entrypoint, std::unordered_set<uint32_t> &ids) {
1812    std::unordered_set<uint32_t> worklist;
1813    worklist.insert(entrypoint.word(2));
1814
1815    while (!worklist.empty()) {
1816        auto id_iter = worklist.begin();
1817        auto id = *id_iter;
1818        worklist.erase(id_iter);
1819
1820        auto insn = src->get_def(id);
1821        if (insn == src->end()) {
1822            /* id is something we didn't collect in build_def_index. that's OK -- we'll stumble
1823             * across all kinds of things here that we may not care about. */
1824            continue;
1825        }
1826
1827        /* try to add to the output set */
1828        if (!ids.insert(id).second) {
1829            continue; /* if we already saw this id, we don't want to walk it again. */
1830        }
1831
1832        switch (insn.opcode()) {
1833        case spv::OpFunction:
1834            /* scan whole body of the function, enlisting anything interesting */
1835            while (++insn, insn.opcode() != spv::OpFunctionEnd) {
1836                switch (insn.opcode()) {
1837                case spv::OpLoad:
1838                case spv::OpAtomicLoad:
1839                case spv::OpAtomicExchange:
1840                case spv::OpAtomicCompareExchange:
1841                case spv::OpAtomicCompareExchangeWeak:
1842                case spv::OpAtomicIIncrement:
1843                case spv::OpAtomicIDecrement:
1844                case spv::OpAtomicIAdd:
1845                case spv::OpAtomicISub:
1846                case spv::OpAtomicSMin:
1847                case spv::OpAtomicUMin:
1848                case spv::OpAtomicSMax:
1849                case spv::OpAtomicUMax:
1850                case spv::OpAtomicAnd:
1851                case spv::OpAtomicOr:
1852                case spv::OpAtomicXor:
1853                    worklist.insert(insn.word(3)); /* ptr */
1854                    break;
1855                case spv::OpStore:
1856                case spv::OpAtomicStore:
1857                    worklist.insert(insn.word(1)); /* ptr */
1858                    break;
1859                case spv::OpAccessChain:
1860                case spv::OpInBoundsAccessChain:
1861                    worklist.insert(insn.word(3)); /* base ptr */
1862                    break;
1863                case spv::OpSampledImage:
1864                case spv::OpImageSampleImplicitLod:
1865                case spv::OpImageSampleExplicitLod:
1866                case spv::OpImageSampleDrefImplicitLod:
1867                case spv::OpImageSampleDrefExplicitLod:
1868                case spv::OpImageSampleProjImplicitLod:
1869                case spv::OpImageSampleProjExplicitLod:
1870                case spv::OpImageSampleProjDrefImplicitLod:
1871                case spv::OpImageSampleProjDrefExplicitLod:
1872                case spv::OpImageFetch:
1873                case spv::OpImageGather:
1874                case spv::OpImageDrefGather:
1875                case spv::OpImageRead:
1876                case spv::OpImage:
1877                case spv::OpImageQueryFormat:
1878                case spv::OpImageQueryOrder:
1879                case spv::OpImageQuerySizeLod:
1880                case spv::OpImageQuerySize:
1881                case spv::OpImageQueryLod:
1882                case spv::OpImageQueryLevels:
1883                case spv::OpImageQuerySamples:
1884                case spv::OpImageSparseSampleImplicitLod:
1885                case spv::OpImageSparseSampleExplicitLod:
1886                case spv::OpImageSparseSampleDrefImplicitLod:
1887                case spv::OpImageSparseSampleDrefExplicitLod:
1888                case spv::OpImageSparseSampleProjImplicitLod:
1889                case spv::OpImageSparseSampleProjExplicitLod:
1890                case spv::OpImageSparseSampleProjDrefImplicitLod:
1891                case spv::OpImageSparseSampleProjDrefExplicitLod:
1892                case spv::OpImageSparseFetch:
1893                case spv::OpImageSparseGather:
1894                case spv::OpImageSparseDrefGather:
1895                case spv::OpImageTexelPointer:
1896                    worklist.insert(insn.word(3)); /* image or sampled image */
1897                    break;
1898                case spv::OpImageWrite:
1899                    worklist.insert(insn.word(1)); /* image -- different operand order to above */
1900                    break;
1901                case spv::OpFunctionCall:
1902                    for (uint32_t i = 3; i < insn.len(); i++) {
1903                        worklist.insert(insn.word(i)); /* fn itself, and all args */
1904                    }
1905                    break;
1906
1907                case spv::OpExtInst:
1908                    for (uint32_t i = 5; i < insn.len(); i++) {
1909                        worklist.insert(insn.word(i)); /* operands to ext inst */
1910                    }
1911                    break;
1912                }
1913            }
1914            break;
1915        }
1916    }
1917}
1918
1919static bool validate_push_constant_block_against_pipeline(debug_report_data *report_data,
1920                                                          std::vector<VkPushConstantRange> const *pushConstantRanges,
1921                                                          shader_module const *src, spirv_inst_iter type,
1922                                                          VkShaderStageFlagBits stage) {
1923    bool pass = true;
1924
1925    /* strip off ptrs etc */
1926    type = get_struct_type(src, type, false);
1927    assert(type != src->end());
1928
1929    /* validate directly off the offsets. this isn't quite correct for arrays
1930     * and matrices, but is a good first step. TODO: arrays, matrices, weird
1931     * sizes */
1932    for (auto insn : *src) {
1933        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1934
1935            if (insn.word(3) == spv::DecorationOffset) {
1936                unsigned offset = insn.word(4);
1937                auto size = 4; /* bytes; TODO: calculate this based on the type */
1938
1939                bool found_range = false;
1940                for (auto const &range : *pushConstantRanges) {
1941                    if (range.offset <= offset && range.offset + range.size >= offset + size) {
1942                        found_range = true;
1943
1944                        if ((range.stageFlags & stage) == 0) {
1945                            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1946                                        __LINE__, SHADER_CHECKER_PUSH_CONSTANT_NOT_ACCESSIBLE_FROM_STAGE, "SC",
1947                                        "Push constant range covering variable starting at "
1948                                        "offset %u not accessible from stage %s",
1949                                        offset, string_VkShaderStageFlagBits(stage))) {
1950                                pass = false;
1951                            }
1952                        }
1953
1954                        break;
1955                    }
1956                }
1957
1958                if (!found_range) {
1959                    if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1960                                __LINE__, SHADER_CHECKER_PUSH_CONSTANT_OUT_OF_RANGE, "SC",
1961                                "Push constant range covering variable starting at "
1962                                "offset %u not declared in layout",
1963                                offset)) {
1964                        pass = false;
1965                    }
1966                }
1967            }
1968        }
1969    }
1970
1971    return pass;
1972}
1973
1974static bool validate_push_constant_usage(debug_report_data *report_data,
1975                                         std::vector<VkPushConstantRange> const *pushConstantRanges, shader_module const *src,
1976                                         std::unordered_set<uint32_t> accessible_ids, VkShaderStageFlagBits stage) {
1977    bool pass = true;
1978
1979    for (auto id : accessible_ids) {
1980        auto def_insn = src->get_def(id);
1981        if (def_insn.opcode() == spv::OpVariable && def_insn.word(3) == spv::StorageClassPushConstant) {
1982            pass &= validate_push_constant_block_against_pipeline(report_data, pushConstantRanges, src,
1983                                                                 src->get_def(def_insn.word(1)), stage);
1984        }
1985    }
1986
1987    return pass;
1988}
1989
1990// For given pipelineLayout verify that the set_layout_node at slot.first
1991//  has the requested binding at slot.second and return ptr to that binding
1992static VkDescriptorSetLayoutBinding const * get_descriptor_binding(PIPELINE_LAYOUT_NODE const *pipelineLayout, descriptor_slot_t slot) {
1993
1994    if (!pipelineLayout)
1995        return nullptr;
1996
1997    if (slot.first >= pipelineLayout->descriptorSetLayouts.size())
1998        return nullptr;
1999
2000    return pipelineLayout->setLayouts[slot.first]->GetDescriptorSetLayoutBindingPtrFromBinding(slot.second);
2001}
2002
2003// Block of code at start here for managing/tracking Pipeline state that this layer cares about
2004
2005static uint64_t g_drawCount[NUM_DRAW_TYPES] = {0, 0, 0, 0};
2006
2007// TODO : Should be tracking lastBound per commandBuffer and when draws occur, report based on that cmd buffer lastBound
2008//   Then need to synchronize the accesses based on cmd buffer so that if I'm reading state on one cmd buffer, updates
2009//   to that same cmd buffer by separate thread are not changing state from underneath us
2010// Track the last cmd buffer touched by this thread
2011
2012static bool hasDrawCmd(GLOBAL_CB_NODE *pCB) {
2013    for (uint32_t i = 0; i < NUM_DRAW_TYPES; i++) {
2014        if (pCB->drawCount[i])
2015            return true;
2016    }
2017    return false;
2018}
2019
2020// Check object status for selected flag state
2021static bool validate_status(layer_data *my_data, GLOBAL_CB_NODE *pNode, CBStatusFlags status_mask, VkFlags msg_flags,
2022                            DRAW_STATE_ERROR error_code, const char *fail_msg) {
2023    if (!(pNode->status & status_mask)) {
2024        return log_msg(my_data->report_data, msg_flags, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2025                       reinterpret_cast<const uint64_t &>(pNode->commandBuffer), __LINE__, error_code, "DS",
2026                       "CB object 0x%" PRIxLEAST64 ": %s", reinterpret_cast<const uint64_t &>(pNode->commandBuffer), fail_msg);
2027    }
2028    return false;
2029}
2030
2031// Retrieve pipeline node ptr for given pipeline object
2032static PIPELINE_NODE *getPipeline(layer_data const *my_data, VkPipeline pipeline) {
2033    auto it = my_data->pipelineMap.find(pipeline);
2034    if (it == my_data->pipelineMap.end()) {
2035        return nullptr;
2036    }
2037    return it->second;
2038}
2039
2040static RENDER_PASS_NODE *getRenderPass(layer_data const *my_data, VkRenderPass renderpass) {
2041    auto it = my_data->renderPassMap.find(renderpass);
2042    if (it == my_data->renderPassMap.end()) {
2043        return nullptr;
2044    }
2045    return it->second;
2046}
2047
2048static FRAMEBUFFER_NODE *getFramebuffer(layer_data *my_data, VkFramebuffer framebuffer) {
2049    auto it = my_data->frameBufferMap.find(framebuffer);
2050    if (it == my_data->frameBufferMap.end()) {
2051        return nullptr;
2052    }
2053    return &it->second;
2054}
2055
2056cvdescriptorset::DescriptorSetLayout const *getDescriptorSetLayout(layer_data const *my_data, VkDescriptorSetLayout dsLayout) {
2057    auto it = my_data->descriptorSetLayoutMap.find(dsLayout);
2058    if (it == my_data->descriptorSetLayoutMap.end()) {
2059        return nullptr;
2060    }
2061    return it->second;
2062}
2063
2064static PIPELINE_LAYOUT_NODE const *getPipelineLayout(layer_data const *my_data, VkPipelineLayout pipeLayout) {
2065    auto it = my_data->pipelineLayoutMap.find(pipeLayout);
2066    if (it == my_data->pipelineLayoutMap.end()) {
2067        return nullptr;
2068    }
2069    return &it->second;
2070}
2071
2072// Return true if for a given PSO, the given state enum is dynamic, else return false
2073static bool isDynamic(const PIPELINE_NODE *pPipeline, const VkDynamicState state) {
2074    if (pPipeline && pPipeline->graphicsPipelineCI.pDynamicState) {
2075        for (uint32_t i = 0; i < pPipeline->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
2076            if (state == pPipeline->graphicsPipelineCI.pDynamicState->pDynamicStates[i])
2077                return true;
2078        }
2079    }
2080    return false;
2081}
2082
2083// Validate state stored as flags at time of draw call
2084static bool validate_draw_state_flags(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const PIPELINE_NODE *pPipe, bool indexedDraw) {
2085    bool result;
2086    result = validate_status(dev_data, pCB, CBSTATUS_VIEWPORT_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_VIEWPORT_NOT_BOUND,
2087                             "Dynamic viewport state not set for this command buffer");
2088    result |= validate_status(dev_data, pCB, CBSTATUS_SCISSOR_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_SCISSOR_NOT_BOUND,
2089                              "Dynamic scissor state not set for this command buffer");
2090    if (pPipe->graphicsPipelineCI.pInputAssemblyState &&
2091        ((pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST) ||
2092         (pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP))) {
2093        result |= validate_status(dev_data, pCB, CBSTATUS_LINE_WIDTH_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2094                                  DRAWSTATE_LINE_WIDTH_NOT_BOUND, "Dynamic line width state not set for this command buffer");
2095    }
2096    if (pPipe->graphicsPipelineCI.pRasterizationState &&
2097        (pPipe->graphicsPipelineCI.pRasterizationState->depthBiasEnable == VK_TRUE)) {
2098        result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BIAS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2099                                  DRAWSTATE_DEPTH_BIAS_NOT_BOUND, "Dynamic depth bias state not set for this command buffer");
2100    }
2101    if (pPipe->blendConstantsEnabled) {
2102        result |= validate_status(dev_data, pCB, CBSTATUS_BLEND_CONSTANTS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2103                                  DRAWSTATE_BLEND_NOT_BOUND, "Dynamic blend constants state not set for this command buffer");
2104    }
2105    if (pPipe->graphicsPipelineCI.pDepthStencilState &&
2106        (pPipe->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE)) {
2107        result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BOUNDS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2108                                  DRAWSTATE_DEPTH_BOUNDS_NOT_BOUND, "Dynamic depth bounds state not set for this command buffer");
2109    }
2110    if (pPipe->graphicsPipelineCI.pDepthStencilState &&
2111        (pPipe->graphicsPipelineCI.pDepthStencilState->stencilTestEnable == VK_TRUE)) {
2112        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_READ_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2113                                  DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil read mask state not set for this command buffer");
2114        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_WRITE_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2115                                  DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil write mask state not set for this command buffer");
2116        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_REFERENCE_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2117                                  DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil reference state not set for this command buffer");
2118    }
2119    if (indexedDraw) {
2120        result |= validate_status(dev_data, pCB, CBSTATUS_INDEX_BUFFER_BOUND, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2121                                  DRAWSTATE_INDEX_BUFFER_NOT_BOUND,
2122                                  "Index buffer object not bound to this command buffer when Indexed Draw attempted");
2123    }
2124    return result;
2125}
2126
2127// Verify attachment reference compatibility according to spec
2128//  If one array is larger, treat missing elements of shorter array as VK_ATTACHMENT_UNUSED & other array much match this
2129//  If both AttachmentReference arrays have requested index, check their corresponding AttachementDescriptions
2130//   to make sure that format and samples counts match.
2131//  If not, they are not compatible.
2132static bool attachment_references_compatible(const uint32_t index, const VkAttachmentReference *pPrimary,
2133                                             const uint32_t primaryCount, const VkAttachmentDescription *pPrimaryAttachments,
2134                                             const VkAttachmentReference *pSecondary, const uint32_t secondaryCount,
2135                                             const VkAttachmentDescription *pSecondaryAttachments) {
2136    // Check potential NULL cases first to avoid nullptr issues later
2137    if (pPrimary == nullptr) {
2138        if (pSecondary == nullptr) {
2139            return true;
2140        }
2141        return false;
2142    } else if (pSecondary == nullptr) {
2143        return false;
2144    }
2145    if (index >= primaryCount) { // Check secondary as if primary is VK_ATTACHMENT_UNUSED
2146        if (VK_ATTACHMENT_UNUSED == pSecondary[index].attachment)
2147            return true;
2148    } else if (index >= secondaryCount) { // Check primary as if secondary is VK_ATTACHMENT_UNUSED
2149        if (VK_ATTACHMENT_UNUSED == pPrimary[index].attachment)
2150            return true;
2151    } else { // format and sample count must match
2152        if ((pPrimaryAttachments[pPrimary[index].attachment].format ==
2153             pSecondaryAttachments[pSecondary[index].attachment].format) &&
2154            (pPrimaryAttachments[pPrimary[index].attachment].samples ==
2155             pSecondaryAttachments[pSecondary[index].attachment].samples))
2156            return true;
2157    }
2158    // Format and sample counts didn't match
2159    return false;
2160}
2161
2162// For given primary and secondary RenderPass objects, verify that they're compatible
2163static bool verify_renderpass_compatibility(const layer_data *my_data, const VkRenderPass primaryRP, const VkRenderPass secondaryRP,
2164                                            string &errorMsg) {
2165    auto primary_render_pass = getRenderPass(my_data, primaryRP);
2166    auto secondary_render_pass = getRenderPass(my_data, secondaryRP);
2167
2168    if (!primary_render_pass) {
2169        stringstream errorStr;
2170        errorStr << "invalid VkRenderPass (" << primaryRP << ")";
2171        errorMsg = errorStr.str();
2172        return false;
2173    }
2174
2175    if (!secondary_render_pass) {
2176        stringstream errorStr;
2177        errorStr << "invalid VkRenderPass (" << secondaryRP << ")";
2178        errorMsg = errorStr.str();
2179        return false;
2180    }
2181    // Trivial pass case is exact same RP
2182    if (primaryRP == secondaryRP) {
2183        return true;
2184    }
2185    const VkRenderPassCreateInfo *primaryRPCI = primary_render_pass->pCreateInfo;
2186    const VkRenderPassCreateInfo *secondaryRPCI = secondary_render_pass->pCreateInfo;
2187    if (primaryRPCI->subpassCount != secondaryRPCI->subpassCount) {
2188        stringstream errorStr;
2189        errorStr << "RenderPass for primary cmdBuffer has " << primaryRPCI->subpassCount
2190                 << " subpasses but renderPass for secondary cmdBuffer has " << secondaryRPCI->subpassCount << " subpasses.";
2191        errorMsg = errorStr.str();
2192        return false;
2193    }
2194    uint32_t spIndex = 0;
2195    for (spIndex = 0; spIndex < primaryRPCI->subpassCount; ++spIndex) {
2196        // For each subpass, verify that corresponding color, input, resolve & depth/stencil attachment references are compatible
2197        uint32_t primaryColorCount = primaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2198        uint32_t secondaryColorCount = secondaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2199        uint32_t colorMax = std::max(primaryColorCount, secondaryColorCount);
2200        for (uint32_t cIdx = 0; cIdx < colorMax; ++cIdx) {
2201            if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pColorAttachments, primaryColorCount,
2202                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pColorAttachments,
2203                                                  secondaryColorCount, secondaryRPCI->pAttachments)) {
2204                stringstream errorStr;
2205                errorStr << "color attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2206                errorMsg = errorStr.str();
2207                return false;
2208            } else if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2209                                                         primaryColorCount, primaryRPCI->pAttachments,
2210                                                         secondaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2211                                                         secondaryColorCount, secondaryRPCI->pAttachments)) {
2212                stringstream errorStr;
2213                errorStr << "resolve attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2214                errorMsg = errorStr.str();
2215                return false;
2216            }
2217        }
2218
2219        if (!attachment_references_compatible(0, primaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2220                                              1, primaryRPCI->pAttachments,
2221                                              secondaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2222                                              1, secondaryRPCI->pAttachments)) {
2223            stringstream errorStr;
2224            errorStr << "depth/stencil attachments of subpass index " << spIndex << " are not compatible.";
2225            errorMsg = errorStr.str();
2226            return false;
2227        }
2228
2229        uint32_t primaryInputCount = primaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2230        uint32_t secondaryInputCount = secondaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2231        uint32_t inputMax = std::max(primaryInputCount, secondaryInputCount);
2232        for (uint32_t i = 0; i < inputMax; ++i) {
2233            if (!attachment_references_compatible(i, primaryRPCI->pSubpasses[spIndex].pInputAttachments, primaryColorCount,
2234                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pInputAttachments,
2235                                                  secondaryColorCount, secondaryRPCI->pAttachments)) {
2236                stringstream errorStr;
2237                errorStr << "input attachments at index " << i << " of subpass index " << spIndex << " are not compatible.";
2238                errorMsg = errorStr.str();
2239                return false;
2240            }
2241        }
2242    }
2243    return true;
2244}
2245
2246// For given cvdescriptorset::DescriptorSet, verify that its Set is compatible w/ the setLayout corresponding to
2247// pipelineLayout[layoutIndex]
2248static bool verify_set_layout_compatibility(layer_data *my_data, const cvdescriptorset::DescriptorSet *pSet,
2249                                            const VkPipelineLayout layout, const uint32_t layoutIndex, string &errorMsg) {
2250    auto pipeline_layout = getPipelineLayout(my_data, layout);
2251    if (!pipeline_layout) {
2252        stringstream errorStr;
2253        errorStr << "invalid VkPipelineLayout (" << layout << ")";
2254        errorMsg = errorStr.str();
2255        return false;
2256    }
2257    if (layoutIndex >= pipeline_layout->descriptorSetLayouts.size()) {
2258        stringstream errorStr;
2259        errorStr << "VkPipelineLayout (" << layout << ") only contains " << pipeline_layout->descriptorSetLayouts.size()
2260                 << " setLayouts corresponding to sets 0-" << pipeline_layout->descriptorSetLayouts.size() - 1
2261                 << ", but you're attempting to bind set to index " << layoutIndex;
2262        errorMsg = errorStr.str();
2263        return false;
2264    }
2265    auto layout_node = pipeline_layout->setLayouts[layoutIndex];
2266    return pSet->IsCompatible(layout_node, &errorMsg);
2267}
2268
2269// Validate that data for each specialization entry is fully contained within the buffer.
2270static bool validate_specialization_offsets(debug_report_data *report_data, VkPipelineShaderStageCreateInfo const *info) {
2271    bool pass = true;
2272
2273    VkSpecializationInfo const *spec = info->pSpecializationInfo;
2274
2275    if (spec) {
2276        for (auto i = 0u; i < spec->mapEntryCount; i++) {
2277            if (spec->pMapEntries[i].offset + spec->pMapEntries[i].size > spec->dataSize) {
2278                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2279                            /*dev*/ 0, __LINE__, SHADER_CHECKER_BAD_SPECIALIZATION, "SC",
2280                            "Specialization entry %u (for constant id %u) references memory outside provided "
2281                            "specialization data (bytes %u.." PRINTF_SIZE_T_SPECIFIER "; " PRINTF_SIZE_T_SPECIFIER
2282                            " bytes provided)",
2283                            i, spec->pMapEntries[i].constantID, spec->pMapEntries[i].offset,
2284                            spec->pMapEntries[i].offset + spec->pMapEntries[i].size - 1, spec->dataSize)) {
2285
2286                    pass = false;
2287                }
2288            }
2289        }
2290    }
2291
2292    return pass;
2293}
2294
2295static bool descriptor_type_match(shader_module const *module, uint32_t type_id,
2296                                  VkDescriptorType descriptor_type, unsigned &descriptor_count) {
2297    auto type = module->get_def(type_id);
2298
2299    descriptor_count = 1;
2300
2301    /* Strip off any array or ptrs. Where we remove array levels, adjust the
2302     * descriptor count for each dimension. */
2303    while (type.opcode() == spv::OpTypeArray || type.opcode() == spv::OpTypePointer) {
2304        if (type.opcode() == spv::OpTypeArray) {
2305            descriptor_count *= get_constant_value(module, type.word(3));
2306            type = module->get_def(type.word(2));
2307        }
2308        else {
2309            type = module->get_def(type.word(3));
2310        }
2311    }
2312
2313    switch (type.opcode()) {
2314    case spv::OpTypeStruct: {
2315        for (auto insn : *module) {
2316            if (insn.opcode() == spv::OpDecorate && insn.word(1) == type.word(1)) {
2317                if (insn.word(2) == spv::DecorationBlock) {
2318                    return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
2319                           descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
2320                } else if (insn.word(2) == spv::DecorationBufferBlock) {
2321                    return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
2322                           descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
2323                }
2324            }
2325        }
2326
2327        /* Invalid */
2328        return false;
2329    }
2330
2331    case spv::OpTypeSampler:
2332        return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLER;
2333
2334    case spv::OpTypeSampledImage:
2335        if (descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) {
2336            /* Slight relaxation for some GLSL historical madness: samplerBuffer
2337             * doesn't really have a sampler, and a texel buffer descriptor
2338             * doesn't really provide one. Allow this slight mismatch.
2339             */
2340            auto image_type = module->get_def(type.word(2));
2341            auto dim = image_type.word(3);
2342            auto sampled = image_type.word(7);
2343            return dim == spv::DimBuffer && sampled == 1;
2344        }
2345        return descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
2346
2347    case spv::OpTypeImage: {
2348        /* Many descriptor types backing image types-- depends on dimension
2349         * and whether the image will be used with a sampler. SPIRV for
2350         * Vulkan requires that sampled be 1 or 2 -- leaving the decision to
2351         * runtime is unacceptable.
2352         */
2353        auto dim = type.word(3);
2354        auto sampled = type.word(7);
2355
2356        if (dim == spv::DimSubpassData) {
2357            return descriptor_type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
2358        } else if (dim == spv::DimBuffer) {
2359            if (sampled == 1) {
2360                return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
2361            } else {
2362                return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
2363            }
2364        } else if (sampled == 1) {
2365            return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
2366        } else {
2367            return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
2368        }
2369    }
2370
2371    /* We shouldn't really see any other junk types -- but if we do, they're
2372     * a mismatch.
2373     */
2374    default:
2375        return false; /* Mismatch */
2376    }
2377}
2378
2379static bool require_feature(debug_report_data *report_data, VkBool32 feature, char const *feature_name) {
2380    if (!feature) {
2381        if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2382                    __LINE__, SHADER_CHECKER_FEATURE_NOT_ENABLED, "SC",
2383                    "Shader requires VkPhysicalDeviceFeatures::%s but is not "
2384                    "enabled on the device",
2385                    feature_name)) {
2386            return false;
2387        }
2388    }
2389
2390    return true;
2391}
2392
2393static bool validate_shader_capabilities(debug_report_data *report_data, shader_module const *src,
2394                                         VkPhysicalDeviceFeatures const *enabledFeatures) {
2395    bool pass = true;
2396
2397
2398    for (auto insn : *src) {
2399        if (insn.opcode() == spv::OpCapability) {
2400            switch (insn.word(1)) {
2401            case spv::CapabilityMatrix:
2402            case spv::CapabilityShader:
2403            case spv::CapabilityInputAttachment:
2404            case spv::CapabilitySampled1D:
2405            case spv::CapabilityImage1D:
2406            case spv::CapabilitySampledBuffer:
2407            case spv::CapabilityImageBuffer:
2408            case spv::CapabilityImageQuery:
2409            case spv::CapabilityDerivativeControl:
2410                // Always supported by a Vulkan 1.0 implementation -- no feature bits.
2411                break;
2412
2413            case spv::CapabilityGeometry:
2414                pass &= require_feature(report_data, enabledFeatures->geometryShader, "geometryShader");
2415                break;
2416
2417            case spv::CapabilityTessellation:
2418                pass &= require_feature(report_data, enabledFeatures->tessellationShader, "tessellationShader");
2419                break;
2420
2421            case spv::CapabilityFloat64:
2422                pass &= require_feature(report_data, enabledFeatures->shaderFloat64, "shaderFloat64");
2423                break;
2424
2425            case spv::CapabilityInt64:
2426                pass &= require_feature(report_data, enabledFeatures->shaderInt64, "shaderInt64");
2427                break;
2428
2429            case spv::CapabilityTessellationPointSize:
2430            case spv::CapabilityGeometryPointSize:
2431                pass &= require_feature(report_data, enabledFeatures->shaderTessellationAndGeometryPointSize,
2432                                        "shaderTessellationAndGeometryPointSize");
2433                break;
2434
2435            case spv::CapabilityImageGatherExtended:
2436                pass &= require_feature(report_data, enabledFeatures->shaderImageGatherExtended, "shaderImageGatherExtended");
2437                break;
2438
2439            case spv::CapabilityStorageImageMultisample:
2440                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample");
2441                break;
2442
2443            case spv::CapabilityUniformBufferArrayDynamicIndexing:
2444                pass &= require_feature(report_data, enabledFeatures->shaderUniformBufferArrayDynamicIndexing,
2445                                        "shaderUniformBufferArrayDynamicIndexing");
2446                break;
2447
2448            case spv::CapabilitySampledImageArrayDynamicIndexing:
2449                pass &= require_feature(report_data, enabledFeatures->shaderSampledImageArrayDynamicIndexing,
2450                                        "shaderSampledImageArrayDynamicIndexing");
2451                break;
2452
2453            case spv::CapabilityStorageBufferArrayDynamicIndexing:
2454                pass &= require_feature(report_data, enabledFeatures->shaderStorageBufferArrayDynamicIndexing,
2455                                        "shaderStorageBufferArrayDynamicIndexing");
2456                break;
2457
2458            case spv::CapabilityStorageImageArrayDynamicIndexing:
2459                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageArrayDynamicIndexing,
2460                                        "shaderStorageImageArrayDynamicIndexing");
2461                break;
2462
2463            case spv::CapabilityClipDistance:
2464                pass &= require_feature(report_data, enabledFeatures->shaderClipDistance, "shaderClipDistance");
2465                break;
2466
2467            case spv::CapabilityCullDistance:
2468                pass &= require_feature(report_data, enabledFeatures->shaderCullDistance, "shaderCullDistance");
2469                break;
2470
2471            case spv::CapabilityImageCubeArray:
2472                pass &= require_feature(report_data, enabledFeatures->imageCubeArray, "imageCubeArray");
2473                break;
2474
2475            case spv::CapabilitySampleRateShading:
2476                pass &= require_feature(report_data, enabledFeatures->sampleRateShading, "sampleRateShading");
2477                break;
2478
2479            case spv::CapabilitySparseResidency:
2480                pass &= require_feature(report_data, enabledFeatures->shaderResourceResidency, "shaderResourceResidency");
2481                break;
2482
2483            case spv::CapabilityMinLod:
2484                pass &= require_feature(report_data, enabledFeatures->shaderResourceMinLod, "shaderResourceMinLod");
2485                break;
2486
2487            case spv::CapabilitySampledCubeArray:
2488                pass &= require_feature(report_data, enabledFeatures->imageCubeArray, "imageCubeArray");
2489                break;
2490
2491            case spv::CapabilityImageMSArray:
2492                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample");
2493                break;
2494
2495            case spv::CapabilityStorageImageExtendedFormats:
2496                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageExtendedFormats,
2497                                        "shaderStorageImageExtendedFormats");
2498                break;
2499
2500            case spv::CapabilityInterpolationFunction:
2501                pass &= require_feature(report_data, enabledFeatures->sampleRateShading, "sampleRateShading");
2502                break;
2503
2504            case spv::CapabilityStorageImageReadWithoutFormat:
2505                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageReadWithoutFormat,
2506                                        "shaderStorageImageReadWithoutFormat");
2507                break;
2508
2509            case spv::CapabilityStorageImageWriteWithoutFormat:
2510                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageWriteWithoutFormat,
2511                                        "shaderStorageImageWriteWithoutFormat");
2512                break;
2513
2514            case spv::CapabilityMultiViewport:
2515                pass &= require_feature(report_data, enabledFeatures->multiViewport, "multiViewport");
2516                break;
2517
2518            default:
2519                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2520                            __LINE__, SHADER_CHECKER_BAD_CAPABILITY, "SC",
2521                            "Shader declares capability %u, not supported in Vulkan.",
2522                            insn.word(1)))
2523                    pass = false;
2524                break;
2525            }
2526        }
2527    }
2528
2529    return pass;
2530}
2531
2532static bool validate_pipeline_shader_stage(debug_report_data *report_data,
2533                                           VkPipelineShaderStageCreateInfo const *pStage,
2534                                           PIPELINE_NODE *pipeline,
2535                                           shader_module **out_module,
2536                                           spirv_inst_iter *out_entrypoint,
2537                                           VkPhysicalDeviceFeatures const *enabledFeatures,
2538                                           std::unordered_map<VkShaderModule,
2539                                           std::unique_ptr<shader_module>> const &shaderModuleMap) {
2540    bool pass = true;
2541    auto module_it = shaderModuleMap.find(pStage->module);
2542    auto module = *out_module = module_it->second.get();
2543    pass &= validate_specialization_offsets(report_data, pStage);
2544
2545    /* find the entrypoint */
2546    auto entrypoint = *out_entrypoint = find_entrypoint(module, pStage->pName, pStage->stage);
2547    if (entrypoint == module->end()) {
2548        if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2549                    __LINE__, SHADER_CHECKER_MISSING_ENTRYPOINT, "SC",
2550                    "No entrypoint found named `%s` for stage %s", pStage->pName,
2551                    string_VkShaderStageFlagBits(pStage->stage))) {
2552            pass = false;
2553        }
2554    }
2555
2556    /* validate shader capabilities against enabled device features */
2557    pass &= validate_shader_capabilities(report_data, module, enabledFeatures);
2558
2559    /* mark accessible ids */
2560    std::unordered_set<uint32_t> accessible_ids;
2561    mark_accessible_ids(module, entrypoint, accessible_ids);
2562
2563    /* validate descriptor set layout against what the entrypoint actually uses */
2564    std::map<descriptor_slot_t, interface_var> descriptor_uses;
2565    collect_interface_by_descriptor_slot(report_data, module, accessible_ids, descriptor_uses);
2566
2567    auto pipelineLayout = pipeline->pipelineLayout;
2568
2569    /* validate push constant usage */
2570    pass &= validate_push_constant_usage(report_data, &pipelineLayout->pushConstantRanges,
2571                                        module, accessible_ids, pStage->stage);
2572
2573    /* validate descriptor use */
2574    for (auto use : descriptor_uses) {
2575        // While validating shaders capture which slots are used by the pipeline
2576        pipeline->active_slots[use.first.first].insert(use.first.second);
2577
2578        /* verify given pipelineLayout has requested setLayout with requested binding */
2579        const auto & binding = get_descriptor_binding(pipelineLayout, use.first);
2580        unsigned required_descriptor_count;
2581
2582        if (!binding) {
2583            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2584                        __LINE__, SHADER_CHECKER_MISSING_DESCRIPTOR, "SC",
2585                        "Shader uses descriptor slot %u.%u (used as type `%s`) but not declared in pipeline layout",
2586                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str())) {
2587                pass = false;
2588            }
2589        } else if (~binding->stageFlags & pStage->stage) {
2590            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2591                        /*dev*/ 0, __LINE__, SHADER_CHECKER_DESCRIPTOR_NOT_ACCESSIBLE_FROM_STAGE, "SC",
2592                        "Shader uses descriptor slot %u.%u (used "
2593                        "as type `%s`) but descriptor not "
2594                        "accessible from stage %s",
2595                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str(),
2596                        string_VkShaderStageFlagBits(pStage->stage))) {
2597                pass = false;
2598            }
2599        } else if (!descriptor_type_match(module, use.second.type_id, binding->descriptorType,
2600                                          /*out*/ required_descriptor_count)) {
2601            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2602                        SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC", "Type mismatch on descriptor slot "
2603                                                                       "%u.%u (used as type `%s`) but "
2604                                                                       "descriptor of type %s",
2605                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str(),
2606                        string_VkDescriptorType(binding->descriptorType))) {
2607                pass = false;
2608            }
2609        } else if (binding->descriptorCount < required_descriptor_count) {
2610            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2611                        SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC",
2612                        "Shader expects at least %u descriptors for binding %u.%u (used as type `%s`) but only %u provided",
2613                        required_descriptor_count, use.first.first, use.first.second,
2614                        describe_type(module, use.second.type_id).c_str(), binding->descriptorCount)) {
2615                pass = false;
2616            }
2617        }
2618    }
2619
2620    return pass;
2621}
2622
2623
2624// Validate that the shaders used by the given pipeline and store the active_slots
2625//  that are actually used by the pipeline into pPipeline->active_slots
2626static bool validate_and_capture_pipeline_shader_state(debug_report_data *report_data, PIPELINE_NODE *pPipeline,
2627                                                       VkPhysicalDeviceFeatures const *enabledFeatures,
2628                                                       std::unordered_map<VkShaderModule, unique_ptr<shader_module>> const & shaderModuleMap) {
2629    auto pCreateInfo = pPipeline->graphicsPipelineCI.ptr();
2630    int vertex_stage = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2631    int fragment_stage = get_shader_stage_id(VK_SHADER_STAGE_FRAGMENT_BIT);
2632
2633    shader_module *shaders[5];
2634    memset(shaders, 0, sizeof(shaders));
2635    spirv_inst_iter entrypoints[5];
2636    memset(entrypoints, 0, sizeof(entrypoints));
2637    VkPipelineVertexInputStateCreateInfo const *vi = 0;
2638    bool pass = true;
2639
2640    for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
2641        auto pStage = &pCreateInfo->pStages[i];
2642        auto stage_id = get_shader_stage_id(pStage->stage);
2643        pass &= validate_pipeline_shader_stage(report_data, pStage, pPipeline,
2644                                               &shaders[stage_id], &entrypoints[stage_id],
2645                                               enabledFeatures, shaderModuleMap);
2646    }
2647
2648    vi = pCreateInfo->pVertexInputState;
2649
2650    if (vi) {
2651        pass &= validate_vi_consistency(report_data, vi);
2652    }
2653
2654    if (shaders[vertex_stage]) {
2655        pass &= validate_vi_against_vs_inputs(report_data, vi, shaders[vertex_stage], entrypoints[vertex_stage]);
2656    }
2657
2658    int producer = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2659    int consumer = get_shader_stage_id(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT);
2660
2661    while (!shaders[producer] && producer != fragment_stage) {
2662        producer++;
2663        consumer++;
2664    }
2665
2666    for (; producer != fragment_stage && consumer <= fragment_stage; consumer++) {
2667        assert(shaders[producer]);
2668        if (shaders[consumer]) {
2669            pass &= validate_interface_between_stages(report_data,
2670                                                      shaders[producer], entrypoints[producer], &shader_stage_attribs[producer],
2671                                                      shaders[consumer], entrypoints[consumer], &shader_stage_attribs[consumer]);
2672
2673            producer = consumer;
2674        }
2675    }
2676
2677    if (shaders[fragment_stage] && pPipeline->renderPass) {
2678        pass &= validate_fs_outputs_against_render_pass(report_data, shaders[fragment_stage], entrypoints[fragment_stage],
2679                                                        pPipeline->renderPass, pCreateInfo->subpass);
2680    }
2681
2682    return pass;
2683}
2684
2685static bool validate_compute_pipeline(debug_report_data *report_data, PIPELINE_NODE *pPipeline, VkPhysicalDeviceFeatures const *enabledFeatures,
2686                                      std::unordered_map<VkShaderModule, unique_ptr<shader_module>> const & shaderModuleMap) {
2687    auto pCreateInfo = pPipeline->computePipelineCI.ptr();
2688
2689    shader_module *module;
2690    spirv_inst_iter entrypoint;
2691
2692    return validate_pipeline_shader_stage(report_data, &pCreateInfo->stage, pPipeline,
2693                                          &module, &entrypoint, enabledFeatures, shaderModuleMap);
2694}
2695// Return Set node ptr for specified set or else NULL
2696cvdescriptorset::DescriptorSet *getSetNode(const layer_data *my_data, VkDescriptorSet set) {
2697    auto set_it = my_data->setMap.find(set);
2698    if (set_it == my_data->setMap.end()) {
2699        return NULL;
2700    }
2701    return set_it->second;
2702}
2703// For the given command buffer, verify and update the state for activeSetBindingsPairs
2704//  This includes:
2705//  1. Verifying that any dynamic descriptor in that set has a valid dynamic offset bound.
2706//     To be valid, the dynamic offset combined with the offset and range from its
2707//     descriptor update must not overflow the size of its buffer being updated
2708//  2. Grow updateImages for given pCB to include any bound STORAGE_IMAGE descriptor images
2709//  3. Grow updateBuffers for pCB to include buffers from STORAGE*_BUFFER descriptor buffers
2710static bool validate_and_update_drawtime_descriptor_state(
2711    layer_data *dev_data, GLOBAL_CB_NODE *pCB,
2712    const vector<std::tuple<cvdescriptorset::DescriptorSet *, unordered_set<uint32_t>,
2713                            std::vector<uint32_t> const *>> &activeSetBindingsPairs) {
2714    bool result = false;
2715    for (auto set_bindings_pair : activeSetBindingsPairs) {
2716        cvdescriptorset::DescriptorSet *set_node = std::get<0>(set_bindings_pair);
2717        std::string err_str;
2718        if (!set_node->ValidateDrawState(std::get<1>(set_bindings_pair), *std::get<2>(set_bindings_pair),
2719                                         &err_str)) {
2720            // Report error here
2721            auto set = set_node->GetSet();
2722            result |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2723                              reinterpret_cast<const uint64_t &>(set), __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
2724                              "DS 0x%" PRIxLEAST64 " encountered the following validation error at draw time: %s",
2725                              reinterpret_cast<const uint64_t &>(set), err_str.c_str());
2726        }
2727        set_node->GetStorageUpdates(std::get<1>(set_bindings_pair), &pCB->updateBuffers, &pCB->updateImages);
2728    }
2729    return result;
2730}
2731
2732// For given pipeline, return number of MSAA samples, or one if MSAA disabled
2733static VkSampleCountFlagBits getNumSamples(PIPELINE_NODE const *pipe) {
2734    if (pipe->graphicsPipelineCI.pMultisampleState != NULL &&
2735        VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO == pipe->graphicsPipelineCI.pMultisampleState->sType) {
2736        return pipe->graphicsPipelineCI.pMultisampleState->rasterizationSamples;
2737    }
2738    return VK_SAMPLE_COUNT_1_BIT;
2739}
2740
2741// Validate draw-time state related to the PSO
2742static bool validatePipelineDrawtimeState(layer_data const *my_data,
2743                                          LAST_BOUND_STATE const &state,
2744                                          const GLOBAL_CB_NODE *pCB,
2745                                          PIPELINE_NODE const *pPipeline) {
2746    bool skip_call = false;
2747
2748    // Verify Vtx binding
2749    if (pPipeline->vertexBindingDescriptions.size() > 0) {
2750        for (size_t i = 0; i < pPipeline->vertexBindingDescriptions.size(); i++) {
2751            if ((pCB->currentDrawData.buffers.size() < (i + 1)) || (pCB->currentDrawData.buffers[i] == VK_NULL_HANDLE)) {
2752                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
2753                                  __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
2754                                  "The Pipeline State Object (0x%" PRIxLEAST64
2755                                  ") expects that this Command Buffer's vertex binding Index " PRINTF_SIZE_T_SPECIFIER
2756                                  " should be set via vkCmdBindVertexBuffers.",
2757                                  (uint64_t)state.pipeline, i);
2758            }
2759        }
2760    } else {
2761        if (!pCB->currentDrawData.buffers.empty()) {
2762            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
2763                              0, __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
2764                              "Vertex buffers are bound to command buffer (0x%" PRIxLEAST64
2765                              ") but no vertex buffers are attached to this Pipeline State Object (0x%" PRIxLEAST64 ").",
2766                              (uint64_t)pCB->commandBuffer, (uint64_t)state.pipeline);
2767        }
2768    }
2769    // If Viewport or scissors are dynamic, verify that dynamic count matches PSO count.
2770    // Skip check if rasterization is disabled or there is no viewport.
2771    if ((!pPipeline->graphicsPipelineCI.pRasterizationState ||
2772         (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) &&
2773        pPipeline->graphicsPipelineCI.pViewportState) {
2774        bool dynViewport = isDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
2775        bool dynScissor = isDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
2776        if (dynViewport) {
2777            if (pCB->viewports.size() != pPipeline->graphicsPipelineCI.pViewportState->viewportCount) {
2778                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
2779                                  __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
2780                                  "Dynamic viewportCount from vkCmdSetViewport() is " PRINTF_SIZE_T_SPECIFIER
2781                                  ", but PSO viewportCount is %u. These counts must match.",
2782                                  pCB->viewports.size(), pPipeline->graphicsPipelineCI.pViewportState->viewportCount);
2783            }
2784        }
2785        if (dynScissor) {
2786            if (pCB->scissors.size() != pPipeline->graphicsPipelineCI.pViewportState->scissorCount) {
2787                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
2788                                  __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
2789                                  "Dynamic scissorCount from vkCmdSetScissor() is " PRINTF_SIZE_T_SPECIFIER
2790                                  ", but PSO scissorCount is %u. These counts must match.",
2791                                  pCB->scissors.size(), pPipeline->graphicsPipelineCI.pViewportState->scissorCount);
2792            }
2793        }
2794    }
2795
2796    // Verify that any MSAA request in PSO matches sample# in bound FB
2797    // Skip the check if rasterization is disabled.
2798    if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
2799        (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
2800        VkSampleCountFlagBits pso_num_samples = getNumSamples(pPipeline);
2801        if (pCB->activeRenderPass) {
2802            const VkRenderPassCreateInfo *render_pass_info = pCB->activeRenderPass->pCreateInfo;
2803            const VkSubpassDescription *subpass_desc = &render_pass_info->pSubpasses[pCB->activeSubpass];
2804            VkSampleCountFlagBits subpass_num_samples = VkSampleCountFlagBits(0);
2805            uint32_t i;
2806
2807            const safe_VkPipelineColorBlendStateCreateInfo *color_blend_state = pPipeline->graphicsPipelineCI.pColorBlendState;
2808            if ((color_blend_state != NULL) && (pCB->activeSubpass == pPipeline->graphicsPipelineCI.subpass) &&
2809                (color_blend_state->attachmentCount != subpass_desc->colorAttachmentCount)) {
2810                skip_call |=
2811                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
2812                                reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
2813                                "Render pass subpass %u mismatch with blending state defined and blend state attachment "
2814                                "count %u while subpass color attachment count %u in Pipeline (0x%" PRIxLEAST64 ")!  These "
2815                                "must be the same at draw-time.",
2816                                pCB->activeSubpass, color_blend_state->attachmentCount, subpass_desc->colorAttachmentCount,
2817                                reinterpret_cast<const uint64_t &>(pPipeline->pipeline));
2818            }
2819
2820            for (i = 0; i < subpass_desc->colorAttachmentCount; i++) {
2821                VkSampleCountFlagBits samples;
2822
2823                if (subpass_desc->pColorAttachments[i].attachment == VK_ATTACHMENT_UNUSED)
2824                    continue;
2825
2826                samples = render_pass_info->pAttachments[subpass_desc->pColorAttachments[i].attachment].samples;
2827                if (subpass_num_samples == static_cast<VkSampleCountFlagBits>(0)) {
2828                    subpass_num_samples = samples;
2829                } else if (subpass_num_samples != samples) {
2830                    subpass_num_samples = static_cast<VkSampleCountFlagBits>(-1);
2831                    break;
2832                }
2833            }
2834            if ((subpass_desc->pDepthStencilAttachment != NULL) &&
2835                (subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) {
2836                const VkSampleCountFlagBits samples =
2837                        render_pass_info->pAttachments[subpass_desc->pDepthStencilAttachment->attachment].samples;
2838                if (subpass_num_samples == static_cast<VkSampleCountFlagBits>(0))
2839                    subpass_num_samples = samples;
2840                else if (subpass_num_samples != samples)
2841                    subpass_num_samples = static_cast<VkSampleCountFlagBits>(-1);
2842            }
2843
2844            if (((subpass_desc->colorAttachmentCount > 0) || (subpass_desc->pDepthStencilAttachment != NULL)) &&
2845                (pso_num_samples != subpass_num_samples)) {
2846                skip_call |=
2847                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
2848                                reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
2849                                "Num samples mismatch! At draw-time in Pipeline (0x%" PRIxLEAST64
2850                                ") with %u samples while current RenderPass (0x%" PRIxLEAST64 ") w/ %u samples!",
2851                                reinterpret_cast<const uint64_t &>(pPipeline->pipeline), pso_num_samples,
2852                                reinterpret_cast<const uint64_t &>(pCB->activeRenderPass->renderPass), subpass_num_samples);
2853            }
2854        } else {
2855            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
2856                                 reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
2857                                 "No active render pass found at draw-time in Pipeline (0x%" PRIxLEAST64 ")!",
2858                                 reinterpret_cast<const uint64_t &>(pPipeline->pipeline));
2859        }
2860    }
2861    // Verify that PSO creation renderPass is compatible with active renderPass
2862    if (pCB->activeRenderPass) {
2863        std::string err_string;
2864        if (!verify_renderpass_compatibility(my_data, pCB->activeRenderPass->renderPass, pPipeline->graphicsPipelineCI.renderPass,
2865                                             err_string)) {
2866            // renderPass that PSO was created with must be compatible with active renderPass that PSO is being used with
2867            skip_call |=
2868                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
2869                        reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
2870                        "At Draw time the active render pass (0x%" PRIxLEAST64 ") is incompatible w/ gfx pipeline "
2871                        "(0x%" PRIxLEAST64 ") that was created w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
2872                        reinterpret_cast<uint64_t &>(pCB->activeRenderPass->renderPass), reinterpret_cast<uint64_t &>(pPipeline),
2873                        reinterpret_cast<const uint64_t &>(pPipeline->graphicsPipelineCI.renderPass), err_string.c_str());
2874        }
2875    }
2876    // TODO : Add more checks here
2877
2878    return skip_call;
2879}
2880
2881// Validate overall state at the time of a draw call
2882static bool validate_and_update_draw_state(layer_data *my_data, GLOBAL_CB_NODE *pCB, const bool indexedDraw,
2883                                           const VkPipelineBindPoint bindPoint) {
2884    bool result = false;
2885    auto const &state = pCB->lastBound[bindPoint];
2886    PIPELINE_NODE *pPipe = getPipeline(my_data, state.pipeline);
2887    if (nullptr == pPipe) {
2888        result |= log_msg(
2889            my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2890            DRAWSTATE_INVALID_PIPELINE, "DS",
2891            "At Draw/Dispatch time no valid VkPipeline is bound! This is illegal. Please bind one with vkCmdBindPipeline().");
2892        // Early return as any further checks below will be busted w/o a pipeline
2893        if (result)
2894            return true;
2895    }
2896    // First check flag states
2897    if (VK_PIPELINE_BIND_POINT_GRAPHICS == bindPoint)
2898        result = validate_draw_state_flags(my_data, pCB, pPipe, indexedDraw);
2899
2900    // Now complete other state checks
2901    if (state.pipelineLayout) {
2902        string errorString;
2903        auto pipelineLayout = (bindPoint == VK_PIPELINE_BIND_POINT_GRAPHICS) ? pPipe->graphicsPipelineCI.layout : pPipe->computePipelineCI.layout;
2904
2905        // Need a vector (vs. std::set) of active Sets for dynamicOffset validation in case same set bound w/ different offsets
2906        vector<std::tuple<cvdescriptorset::DescriptorSet *, unordered_set<uint32_t>, std::vector<uint32_t> const *>> activeSetBindingsPairs;
2907        for (auto & setBindingPair : pPipe->active_slots) {
2908            uint32_t setIndex = setBindingPair.first;
2909            // If valid set is not bound throw an error
2910            if ((state.boundDescriptorSets.size() <= setIndex) || (!state.boundDescriptorSets[setIndex])) {
2911                result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
2912                                  DRAWSTATE_DESCRIPTOR_SET_NOT_BOUND, "DS",
2913                                  "VkPipeline 0x%" PRIxLEAST64 " uses set #%u but that set is not bound.", (uint64_t)pPipe->pipeline,
2914                                  setIndex);
2915            } else if (!verify_set_layout_compatibility(my_data, state.boundDescriptorSets[setIndex],
2916                                                        pipelineLayout, setIndex, errorString)) {
2917                // Set is bound but not compatible w/ overlapping pipelineLayout from PSO
2918                VkDescriptorSet setHandle = state.boundDescriptorSets[setIndex]->GetSet();
2919                result |=
2920                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2921                            (uint64_t)setHandle, __LINE__, DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
2922                            "VkDescriptorSet (0x%" PRIxLEAST64
2923                            ") bound as set #%u is not compatible with overlapping VkPipelineLayout 0x%" PRIxLEAST64 " due to: %s",
2924                            (uint64_t)setHandle, setIndex, (uint64_t)pipelineLayout, errorString.c_str());
2925            } else { // Valid set is bound and layout compatible, validate that it's updated
2926                // Pull the set node
2927                cvdescriptorset::DescriptorSet *pSet = state.boundDescriptorSets[setIndex];
2928                // Save vector of all active sets to verify dynamicOffsets below
2929                activeSetBindingsPairs.push_back(std::make_tuple(pSet, setBindingPair.second,
2930                                                                 &state.dynamicOffsets[setIndex]));
2931                // Make sure set has been updated if it has no immutable samplers
2932                //  If it has immutable samplers, we'll flag error later as needed depending on binding
2933                if (!pSet->IsUpdated()) {
2934                    for (auto binding : setBindingPair.second) {
2935                        if (!pSet->GetImmutableSamplerPtrFromBinding(binding)) {
2936                            result |= log_msg(
2937                                my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2938                                (uint64_t)pSet->GetSet(), __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
2939                                "DS 0x%" PRIxLEAST64 " bound but it was never updated. It is now being used to draw so "
2940                                "this will result in undefined behavior.",
2941                                (uint64_t)pSet->GetSet());
2942                        }
2943                    }
2944                }
2945            }
2946        }
2947        // For given active slots, verify any dynamic descriptors and record updated images & buffers
2948        result |= validate_and_update_drawtime_descriptor_state(my_data, pCB, activeSetBindingsPairs);
2949    }
2950
2951    // Check general pipeline state that needs to be validated at drawtime
2952    if (VK_PIPELINE_BIND_POINT_GRAPHICS == bindPoint)
2953        result |= validatePipelineDrawtimeState(my_data, state, pCB, pPipe);
2954
2955    return result;
2956}
2957
2958// Validate HW line width capabilities prior to setting requested line width.
2959static bool verifyLineWidth(layer_data *my_data, DRAW_STATE_ERROR dsError, const uint64_t &target, float lineWidth) {
2960    bool skip_call = false;
2961
2962    // First check to see if the physical device supports wide lines.
2963    if ((VK_FALSE == my_data->phys_dev_properties.features.wideLines) && (1.0f != lineWidth)) {
2964        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, target, __LINE__,
2965                             dsError, "DS", "Attempt to set lineWidth to %f but physical device wideLines feature "
2966                                            "not supported/enabled so lineWidth must be 1.0f!",
2967                             lineWidth);
2968    } else {
2969        // Otherwise, make sure the width falls in the valid range.
2970        if ((my_data->phys_dev_properties.properties.limits.lineWidthRange[0] > lineWidth) ||
2971            (my_data->phys_dev_properties.properties.limits.lineWidthRange[1] < lineWidth)) {
2972            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, target,
2973                                 __LINE__, dsError, "DS", "Attempt to set lineWidth to %f but physical device limits line width "
2974                                                          "to between [%f, %f]!",
2975                                 lineWidth, my_data->phys_dev_properties.properties.limits.lineWidthRange[0],
2976                                 my_data->phys_dev_properties.properties.limits.lineWidthRange[1]);
2977        }
2978    }
2979
2980    return skip_call;
2981}
2982
2983// Verify that create state for a pipeline is valid
2984static bool verifyPipelineCreateState(layer_data *my_data, const VkDevice device, std::vector<PIPELINE_NODE *> pPipelines,
2985                                      int pipelineIndex) {
2986    bool skipCall = false;
2987
2988    PIPELINE_NODE *pPipeline = pPipelines[pipelineIndex];
2989
2990    // If create derivative bit is set, check that we've specified a base
2991    // pipeline correctly, and that the base pipeline was created to allow
2992    // derivatives.
2993    if (pPipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
2994        PIPELINE_NODE *pBasePipeline = nullptr;
2995        if (!((pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) ^
2996              (pPipeline->graphicsPipelineCI.basePipelineIndex != -1))) {
2997            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
2998                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
2999                                "Invalid Pipeline CreateInfo: exactly one of base pipeline index and handle must be specified");
3000        } else if (pPipeline->graphicsPipelineCI.basePipelineIndex != -1) {
3001            if (pPipeline->graphicsPipelineCI.basePipelineIndex >= pipelineIndex) {
3002                skipCall |=
3003                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3004                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3005                            "Invalid Pipeline CreateInfo: base pipeline must occur earlier in array than derivative pipeline.");
3006            } else {
3007                pBasePipeline = pPipelines[pPipeline->graphicsPipelineCI.basePipelineIndex];
3008            }
3009        } else if (pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) {
3010            pBasePipeline = getPipeline(my_data, pPipeline->graphicsPipelineCI.basePipelineHandle);
3011        }
3012
3013        if (pBasePipeline && !(pBasePipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) {
3014            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3015                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3016                                "Invalid Pipeline CreateInfo: base pipeline does not allow derivatives.");
3017        }
3018    }
3019
3020    if (pPipeline->graphicsPipelineCI.pColorBlendState != NULL) {
3021        if (!my_data->phys_dev_properties.features.independentBlend) {
3022            if (pPipeline->attachments.size() > 1) {
3023                VkPipelineColorBlendAttachmentState *pAttachments = &pPipeline->attachments[0];
3024                for (size_t i = 1; i < pPipeline->attachments.size(); i++) {
3025                    if ((pAttachments[0].blendEnable != pAttachments[i].blendEnable) ||
3026                        (pAttachments[0].srcColorBlendFactor != pAttachments[i].srcColorBlendFactor) ||
3027                        (pAttachments[0].dstColorBlendFactor != pAttachments[i].dstColorBlendFactor) ||
3028                        (pAttachments[0].colorBlendOp != pAttachments[i].colorBlendOp) ||
3029                        (pAttachments[0].srcAlphaBlendFactor != pAttachments[i].srcAlphaBlendFactor) ||
3030                        (pAttachments[0].dstAlphaBlendFactor != pAttachments[i].dstAlphaBlendFactor) ||
3031                        (pAttachments[0].alphaBlendOp != pAttachments[i].alphaBlendOp) ||
3032                        (pAttachments[0].colorWriteMask != pAttachments[i].colorWriteMask)) {
3033                        skipCall |=
3034                            log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3035                            DRAWSTATE_INDEPENDENT_BLEND, "DS", "Invalid Pipeline CreateInfo: If independent blend feature not "
3036                            "enabled, all elements of pAttachments must be identical");
3037                    }
3038                }
3039            }
3040        }
3041        if (!my_data->phys_dev_properties.features.logicOp &&
3042            (pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable != VK_FALSE)) {
3043            skipCall |=
3044                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3045                        DRAWSTATE_DISABLED_LOGIC_OP, "DS",
3046                        "Invalid Pipeline CreateInfo: If logic operations feature not enabled, logicOpEnable must be VK_FALSE");
3047        }
3048        if ((pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable == VK_TRUE) &&
3049            ((pPipeline->graphicsPipelineCI.pColorBlendState->logicOp < VK_LOGIC_OP_CLEAR) ||
3050             (pPipeline->graphicsPipelineCI.pColorBlendState->logicOp > VK_LOGIC_OP_SET))) {
3051            skipCall |=
3052                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3053                        DRAWSTATE_INVALID_LOGIC_OP, "DS",
3054                        "Invalid Pipeline CreateInfo: If logicOpEnable is VK_TRUE, logicOp must be a valid VkLogicOp value");
3055        }
3056    }
3057
3058    // Ensure the subpass index is valid. If not, then validate_and_capture_pipeline_shader_state
3059    // produces nonsense errors that confuse users. Other layers should already
3060    // emit errors for renderpass being invalid.
3061    auto renderPass = getRenderPass(my_data, pPipeline->graphicsPipelineCI.renderPass);
3062    if (renderPass &&
3063        pPipeline->graphicsPipelineCI.subpass >= renderPass->pCreateInfo->subpassCount) {
3064        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3065                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: Subpass index %u "
3066                                                                           "is out of range for this renderpass (0..%u)",
3067                            pPipeline->graphicsPipelineCI.subpass, renderPass->pCreateInfo->subpassCount - 1);
3068    }
3069
3070    if (!validate_and_capture_pipeline_shader_state(my_data->report_data, pPipeline, &my_data->phys_dev_properties.features,
3071                                                    my_data->shaderModuleMap)) {
3072        skipCall = true;
3073    }
3074    // Each shader's stage must be unique
3075    if (pPipeline->duplicate_shaders) {
3076        for (uint32_t stage = VK_SHADER_STAGE_VERTEX_BIT; stage & VK_SHADER_STAGE_ALL_GRAPHICS; stage <<= 1) {
3077            if (pPipeline->duplicate_shaders & stage) {
3078                skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
3079                                    __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3080                                    "Invalid Pipeline CreateInfo State: Multiple shaders provided for stage %s",
3081                                    string_VkShaderStageFlagBits(VkShaderStageFlagBits(stage)));
3082            }
3083        }
3084    }
3085    // VS is required
3086    if (!(pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT)) {
3087        skipCall |=
3088            log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3089                    DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: Vtx Shader required");
3090    }
3091    // Either both or neither TC/TE shaders should be defined
3092    if (((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) == 0) !=
3093        ((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) == 0)) {
3094        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3095                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3096                            "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair");
3097    }
3098    // Compute shaders should be specified independent of Gfx shaders
3099    if ((pPipeline->active_shaders & VK_SHADER_STAGE_COMPUTE_BIT) &&
3100        (pPipeline->active_shaders &
3101         (VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT |
3102          VK_SHADER_STAGE_GEOMETRY_BIT | VK_SHADER_STAGE_FRAGMENT_BIT))) {
3103        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3104                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3105                            "Invalid Pipeline CreateInfo State: Do not specify Compute Shader for Gfx Pipeline");
3106    }
3107    // VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid for tessellation pipelines.
3108    // Mismatching primitive topology and tessellation fails graphics pipeline creation.
3109    if (pPipeline->active_shaders & (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) &&
3110        (!pPipeline->graphicsPipelineCI.pInputAssemblyState ||
3111         pPipeline->graphicsPipelineCI.pInputAssemblyState->topology != VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
3112        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3113                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3114                                                                           "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST must be set as IA "
3115                                                                           "topology for tessellation pipelines");
3116    }
3117    if (pPipeline->graphicsPipelineCI.pInputAssemblyState &&
3118        pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) {
3119        if (~pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) {
3120            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3121                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3122                                                                               "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3123                                                                               "topology is only valid for tessellation pipelines");
3124        }
3125        if (!pPipeline->graphicsPipelineCI.pTessellationState) {
3126            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3127                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3128                                "Invalid Pipeline CreateInfo State: "
3129                                "pTessellationState is NULL when VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3130                                "topology used. pTessellationState must not be NULL in this case.");
3131        } else if (!pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints ||
3132                   (pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints > 32)) {
3133            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3134                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3135                                                                               "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3136                                                                               "topology used with patchControlPoints value %u."
3137                                                                               " patchControlPoints should be >0 and <=32.",
3138                                pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints);
3139        }
3140    }
3141    // If a rasterization state is provided, make sure that the line width conforms to the HW.
3142    if (pPipeline->graphicsPipelineCI.pRasterizationState) {
3143        if (!isDynamic(pPipeline, VK_DYNAMIC_STATE_LINE_WIDTH)) {
3144            skipCall |= verifyLineWidth(my_data, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, reinterpret_cast<uint64_t &>(pPipeline),
3145                                        pPipeline->graphicsPipelineCI.pRasterizationState->lineWidth);
3146        }
3147    }
3148    // Viewport state must be included if rasterization is enabled.
3149    // If the viewport state is included, the viewport and scissor counts should always match.
3150    // NOTE : Even if these are flagged as dynamic, counts need to be set correctly for shader compiler
3151    if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
3152        (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
3153        if (!pPipeline->graphicsPipelineCI.pViewportState) {
3154            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3155                                DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", "Gfx Pipeline pViewportState is null. Even if viewport "
3156                                                                           "and scissors are dynamic PSO must include "
3157                                                                           "viewportCount and scissorCount in pViewportState.");
3158        } else if (pPipeline->graphicsPipelineCI.pViewportState->scissorCount !=
3159                   pPipeline->graphicsPipelineCI.pViewportState->viewportCount) {
3160            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3161                                DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3162                                "Gfx Pipeline viewport count (%u) must match scissor count (%u).",
3163                                pPipeline->graphicsPipelineCI.pViewportState->viewportCount,
3164                                pPipeline->graphicsPipelineCI.pViewportState->scissorCount);
3165        } else {
3166            // If viewport or scissor are not dynamic, then verify that data is appropriate for count
3167            bool dynViewport = isDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
3168            bool dynScissor = isDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
3169            if (!dynViewport) {
3170                if (pPipeline->graphicsPipelineCI.pViewportState->viewportCount &&
3171                    !pPipeline->graphicsPipelineCI.pViewportState->pViewports) {
3172                    skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3173                                        __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3174                                        "Gfx Pipeline viewportCount is %u, but pViewports is NULL. For non-zero viewportCount, you "
3175                                        "must either include pViewports data, or include viewport in pDynamicState and set it with "
3176                                        "vkCmdSetViewport().",
3177                                        pPipeline->graphicsPipelineCI.pViewportState->viewportCount);
3178                }
3179            }
3180            if (!dynScissor) {
3181                if (pPipeline->graphicsPipelineCI.pViewportState->scissorCount &&
3182                    !pPipeline->graphicsPipelineCI.pViewportState->pScissors) {
3183                    skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3184                                        __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3185                                        "Gfx Pipeline scissorCount is %u, but pScissors is NULL. For non-zero scissorCount, you "
3186                                        "must either include pScissors data, or include scissor in pDynamicState and set it with "
3187                                        "vkCmdSetScissor().",
3188                                        pPipeline->graphicsPipelineCI.pViewportState->scissorCount);
3189                }
3190            }
3191        }
3192    }
3193    return skipCall;
3194}
3195
3196// Free the Pipeline nodes
3197static void deletePipelines(layer_data *my_data) {
3198    if (my_data->pipelineMap.size() <= 0)
3199        return;
3200    for (auto &pipe_map_pair : my_data->pipelineMap) {
3201        delete pipe_map_pair.second;
3202    }
3203    my_data->pipelineMap.clear();
3204}
3205
3206// Block of code at start here specifically for managing/tracking DSs
3207
3208// Return Pool node ptr for specified pool or else NULL
3209DESCRIPTOR_POOL_NODE *getPoolNode(const layer_data *dev_data, const VkDescriptorPool pool) {
3210    auto pool_it = dev_data->descriptorPoolMap.find(pool);
3211    if (pool_it == dev_data->descriptorPoolMap.end()) {
3212        return NULL;
3213    }
3214    return pool_it->second;
3215}
3216
3217// Return false if update struct is of valid type, otherwise flag error and return code from callback
3218static bool validUpdateStruct(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
3219    switch (pUpdateStruct->sType) {
3220    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3221    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3222        return false;
3223    default:
3224        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3225                       DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3226                       "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3227                       string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
3228    }
3229}
3230
3231// Set count for given update struct in the last parameter
3232static uint32_t getUpdateCount(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
3233    switch (pUpdateStruct->sType) {
3234    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3235        return ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorCount;
3236    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3237        // TODO : Need to understand this case better and make sure code is correct
3238        return ((VkCopyDescriptorSet *)pUpdateStruct)->descriptorCount;
3239    default:
3240        return 0;
3241    }
3242}
3243
3244// For given layout and update, return the first overall index of the layout that is updated
3245static uint32_t getUpdateStartIndex(layer_data *my_data, const VkDevice device, const uint32_t binding_start_index,
3246                                    const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
3247    return binding_start_index + arrayIndex;
3248}
3249// For given layout and update, return the last overall index of the layout that is updated
3250static uint32_t getUpdateEndIndex(layer_data *my_data, const VkDevice device, const uint32_t binding_start_index,
3251                                  const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
3252    uint32_t count = getUpdateCount(my_data, device, pUpdateStruct);
3253    return binding_start_index + arrayIndex + count - 1;
3254}
3255// Verify that the descriptor type in the update struct matches what's expected by the layout
3256static bool validateUpdateConsistency(layer_data *my_data, const VkDevice device, const VkDescriptorType layout_type,
3257                                      const GENERIC_HEADER *pUpdateStruct, uint32_t startIndex, uint32_t endIndex) {
3258    // First get actual type of update
3259    bool skipCall = false;
3260    VkDescriptorType actualType = VK_DESCRIPTOR_TYPE_MAX_ENUM;
3261    switch (pUpdateStruct->sType) {
3262    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3263        actualType = ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorType;
3264        break;
3265    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3266        /* no need to validate */
3267        return false;
3268        break;
3269    default:
3270        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3271                            DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3272                            "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3273                            string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
3274    }
3275    if (!skipCall) {
3276        if (layout_type != actualType) {
3277            skipCall |= log_msg(
3278                my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3279                DRAWSTATE_DESCRIPTOR_TYPE_MISMATCH, "DS",
3280                "Write descriptor update has descriptor type %s that does not match overlapping binding descriptor type of %s!",
3281                string_VkDescriptorType(actualType), string_VkDescriptorType(layout_type));
3282        }
3283    }
3284    return skipCall;
3285}
3286//TODO: Consolidate functions
3287bool FindLayout(const GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, IMAGE_CMD_BUF_LAYOUT_NODE &node, const VkImageAspectFlags aspectMask) {
3288    layer_data *my_data = get_my_data_ptr(get_dispatch_key(pCB->commandBuffer), layer_data_map);
3289    if (!(imgpair.subresource.aspectMask & aspectMask)) {
3290        return false;
3291    }
3292    VkImageAspectFlags oldAspectMask = imgpair.subresource.aspectMask;
3293    imgpair.subresource.aspectMask = aspectMask;
3294    auto imgsubIt = pCB->imageLayoutMap.find(imgpair);
3295    if (imgsubIt == pCB->imageLayoutMap.end()) {
3296        return false;
3297    }
3298    if (node.layout != VK_IMAGE_LAYOUT_MAX_ENUM && node.layout != imgsubIt->second.layout) {
3299        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3300                reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3301                "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple layout types: %s and %s",
3302                reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(node.layout), string_VkImageLayout(imgsubIt->second.layout));
3303    }
3304    if (node.initialLayout != VK_IMAGE_LAYOUT_MAX_ENUM && node.initialLayout != imgsubIt->second.initialLayout) {
3305        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3306                reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3307                "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple initial layout types: %s and %s",
3308                reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(node.initialLayout), string_VkImageLayout(imgsubIt->second.initialLayout));
3309    }
3310    node = imgsubIt->second;
3311    return true;
3312}
3313
3314bool FindLayout(const layer_data *my_data, ImageSubresourcePair imgpair, VkImageLayout &layout, const VkImageAspectFlags aspectMask) {
3315    if (!(imgpair.subresource.aspectMask & aspectMask)) {
3316        return false;
3317    }
3318    VkImageAspectFlags oldAspectMask = imgpair.subresource.aspectMask;
3319    imgpair.subresource.aspectMask = aspectMask;
3320    auto imgsubIt = my_data->imageLayoutMap.find(imgpair);
3321    if (imgsubIt == my_data->imageLayoutMap.end()) {
3322        return false;
3323    }
3324    if (layout != VK_IMAGE_LAYOUT_MAX_ENUM && layout != imgsubIt->second.layout) {
3325        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3326                reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3327                "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple layout types: %s and %s",
3328                reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(layout), string_VkImageLayout(imgsubIt->second.layout));
3329    }
3330    layout = imgsubIt->second.layout;
3331    return true;
3332}
3333
3334// find layout(s) on the cmd buf level
3335bool FindLayout(const GLOBAL_CB_NODE *pCB, VkImage image, VkImageSubresource range, IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3336    ImageSubresourcePair imgpair = {image, true, range};
3337    node = IMAGE_CMD_BUF_LAYOUT_NODE(VK_IMAGE_LAYOUT_MAX_ENUM, VK_IMAGE_LAYOUT_MAX_ENUM);
3338    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_COLOR_BIT);
3339    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_DEPTH_BIT);
3340    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_STENCIL_BIT);
3341    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_METADATA_BIT);
3342    if (node.layout == VK_IMAGE_LAYOUT_MAX_ENUM) {
3343        imgpair = {image, false, VkImageSubresource()};
3344        auto imgsubIt = pCB->imageLayoutMap.find(imgpair);
3345        if (imgsubIt == pCB->imageLayoutMap.end())
3346            return false;
3347        node = imgsubIt->second;
3348    }
3349    return true;
3350}
3351
3352// find layout(s) on the global level
3353bool FindLayout(const layer_data *my_data, ImageSubresourcePair imgpair, VkImageLayout &layout) {
3354    layout = VK_IMAGE_LAYOUT_MAX_ENUM;
3355    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT);
3356    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT);
3357    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT);
3358    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT);
3359    if (layout == VK_IMAGE_LAYOUT_MAX_ENUM) {
3360        imgpair = {imgpair.image, false, VkImageSubresource()};
3361        auto imgsubIt = my_data->imageLayoutMap.find(imgpair);
3362        if (imgsubIt == my_data->imageLayoutMap.end())
3363            return false;
3364        layout = imgsubIt->second.layout;
3365    }
3366    return true;
3367}
3368
3369bool FindLayout(const layer_data *my_data, VkImage image, VkImageSubresource range, VkImageLayout &layout) {
3370    ImageSubresourcePair imgpair = {image, true, range};
3371    return FindLayout(my_data, imgpair, layout);
3372}
3373
3374bool FindLayouts(const layer_data *my_data, VkImage image, std::vector<VkImageLayout> &layouts) {
3375    auto sub_data = my_data->imageSubresourceMap.find(image);
3376    if (sub_data == my_data->imageSubresourceMap.end())
3377        return false;
3378    auto img_node = getImageNode(my_data, image);
3379    if (!img_node)
3380        return false;
3381    bool ignoreGlobal = false;
3382    // TODO: Make this robust for >1 aspect mask. Now it will just say ignore
3383    // potential errors in this case.
3384    if (sub_data->second.size() >= (img_node->createInfo.arrayLayers * img_node->createInfo.mipLevels + 1)) {
3385        ignoreGlobal = true;
3386    }
3387    for (auto imgsubpair : sub_data->second) {
3388        if (ignoreGlobal && !imgsubpair.hasSubresource)
3389            continue;
3390        auto img_data = my_data->imageLayoutMap.find(imgsubpair);
3391        if (img_data != my_data->imageLayoutMap.end()) {
3392            layouts.push_back(img_data->second.layout);
3393        }
3394    }
3395    return true;
3396}
3397
3398// Set the layout on the global level
3399void SetLayout(layer_data *my_data, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
3400    VkImage &image = imgpair.image;
3401    // TODO (mlentine): Maybe set format if new? Not used atm.
3402    my_data->imageLayoutMap[imgpair].layout = layout;
3403    // TODO (mlentine): Maybe make vector a set?
3404    auto subresource = std::find(my_data->imageSubresourceMap[image].begin(), my_data->imageSubresourceMap[image].end(), imgpair);
3405    if (subresource == my_data->imageSubresourceMap[image].end()) {
3406        my_data->imageSubresourceMap[image].push_back(imgpair);
3407    }
3408}
3409
3410// Set the layout on the cmdbuf level
3411void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3412    pCB->imageLayoutMap[imgpair] = node;
3413    // TODO (mlentine): Maybe make vector a set?
3414    auto subresource =
3415        std::find(pCB->imageSubresourceMap[imgpair.image].begin(), pCB->imageSubresourceMap[imgpair.image].end(), imgpair);
3416    if (subresource == pCB->imageSubresourceMap[imgpair.image].end()) {
3417        pCB->imageSubresourceMap[imgpair.image].push_back(imgpair);
3418    }
3419}
3420
3421void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
3422    // TODO (mlentine): Maybe make vector a set?
3423    if (std::find(pCB->imageSubresourceMap[imgpair.image].begin(), pCB->imageSubresourceMap[imgpair.image].end(), imgpair) !=
3424        pCB->imageSubresourceMap[imgpair.image].end()) {
3425        pCB->imageLayoutMap[imgpair].layout = layout;
3426    } else {
3427        // TODO (mlentine): Could be expensive and might need to be removed.
3428        assert(imgpair.hasSubresource);
3429        IMAGE_CMD_BUF_LAYOUT_NODE node;
3430        if (!FindLayout(pCB, imgpair.image, imgpair.subresource, node)) {
3431            node.initialLayout = layout;
3432        }
3433        SetLayout(pCB, imgpair, {node.initialLayout, layout});
3434    }
3435}
3436
3437template <class OBJECT, class LAYOUT>
3438void SetLayout(OBJECT *pObject, ImageSubresourcePair imgpair, const LAYOUT &layout, VkImageAspectFlags aspectMask) {
3439    if (imgpair.subresource.aspectMask & aspectMask) {
3440        imgpair.subresource.aspectMask = aspectMask;
3441        SetLayout(pObject, imgpair, layout);
3442    }
3443}
3444
3445template <class OBJECT, class LAYOUT>
3446void SetLayout(OBJECT *pObject, VkImage image, VkImageSubresource range, const LAYOUT &layout) {
3447    ImageSubresourcePair imgpair = {image, true, range};
3448    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT);
3449    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT);
3450    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT);
3451    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT);
3452}
3453
3454template <class OBJECT, class LAYOUT> void SetLayout(OBJECT *pObject, VkImage image, const LAYOUT &layout) {
3455    ImageSubresourcePair imgpair = {image, false, VkImageSubresource()};
3456    SetLayout(pObject, image, imgpair, layout);
3457}
3458
3459void SetLayout(const layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkImageView imageView, const VkImageLayout &layout) {
3460    auto iv_data = getImageViewData(dev_data, imageView);
3461    assert(iv_data);
3462    const VkImage &image = iv_data->image;
3463    const VkImageSubresourceRange &subRange = iv_data->subresourceRange;
3464    // TODO: Do not iterate over every possibility - consolidate where possible
3465    for (uint32_t j = 0; j < subRange.levelCount; j++) {
3466        uint32_t level = subRange.baseMipLevel + j;
3467        for (uint32_t k = 0; k < subRange.layerCount; k++) {
3468            uint32_t layer = subRange.baseArrayLayer + k;
3469            VkImageSubresource sub = {subRange.aspectMask, level, layer};
3470            SetLayout(pCB, image, sub, layout);
3471        }
3472    }
3473}
3474
3475// Validate that given set is valid and that it's not being used by an in-flight CmdBuffer
3476// func_str is the name of the calling function
3477// Return false if no errors occur
3478// Return true if validation error occurs and callback returns true (to skip upcoming API call down the chain)
3479static bool validateIdleDescriptorSet(const layer_data *my_data, VkDescriptorSet set, std::string func_str) {
3480    bool skip_call = false;
3481    auto set_node = my_data->setMap.find(set);
3482    if (set_node == my_data->setMap.end()) {
3483        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3484                             (uint64_t)(set), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
3485                             "Cannot call %s() on descriptor set 0x%" PRIxLEAST64 " that has not been allocated.", func_str.c_str(),
3486                             (uint64_t)(set));
3487    } else {
3488        if (set_node->second->in_use.load()) {
3489            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
3490                                 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)(set), __LINE__, DRAWSTATE_OBJECT_INUSE,
3491                                 "DS", "Cannot call %s() on descriptor set 0x%" PRIxLEAST64 " that is in use by a command buffer.",
3492                                 func_str.c_str(), (uint64_t)(set));
3493        }
3494    }
3495    return skip_call;
3496}
3497
3498// Remove set from setMap and delete the set
3499static void freeDescriptorSet(layer_data *dev_data, cvdescriptorset::DescriptorSet *descriptor_set) {
3500    dev_data->setMap.erase(descriptor_set->GetSet());
3501    delete descriptor_set;
3502}
3503// Free all DS Pools including their Sets & related sub-structs
3504// NOTE : Calls to this function should be wrapped in mutex
3505static void deletePools(layer_data *my_data) {
3506    if (my_data->descriptorPoolMap.size() <= 0)
3507        return;
3508    for (auto ii = my_data->descriptorPoolMap.begin(); ii != my_data->descriptorPoolMap.end(); ++ii) {
3509        // Remove this pools' sets from setMap and delete them
3510        for (auto ds : (*ii).second->sets) {
3511            freeDescriptorSet(my_data, ds);
3512        }
3513        (*ii).second->sets.clear();
3514    }
3515    my_data->descriptorPoolMap.clear();
3516}
3517
3518static void clearDescriptorPool(layer_data *my_data, const VkDevice device, const VkDescriptorPool pool,
3519                                VkDescriptorPoolResetFlags flags) {
3520    DESCRIPTOR_POOL_NODE *pPool = getPoolNode(my_data, pool);
3521    if (!pPool) {
3522        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
3523                (uint64_t)pool, __LINE__, DRAWSTATE_INVALID_POOL, "DS",
3524                "Unable to find pool node for pool 0x%" PRIxLEAST64 " specified in vkResetDescriptorPool() call", (uint64_t)pool);
3525    } else {
3526        // TODO: validate flags
3527        // For every set off of this pool, clear it, remove from setMap, and free cvdescriptorset::DescriptorSet
3528        for (auto ds : pPool->sets) {
3529            freeDescriptorSet(my_data, ds);
3530        }
3531        pPool->sets.clear();
3532        // Reset available count for each type and available sets for this pool
3533        for (uint32_t i = 0; i < pPool->availableDescriptorTypeCount.size(); ++i) {
3534            pPool->availableDescriptorTypeCount[i] = pPool->maxDescriptorTypeCount[i];
3535        }
3536        pPool->availableSets = pPool->maxSets;
3537    }
3538}
3539
3540// For given CB object, fetch associated CB Node from map
3541static GLOBAL_CB_NODE *getCBNode(layer_data const *my_data, const VkCommandBuffer cb) {
3542    auto it = my_data->commandBufferMap.find(cb);
3543    if (it == my_data->commandBufferMap.end()) {
3544        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3545                reinterpret_cast<const uint64_t &>(cb), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3546                "Attempt to use CommandBuffer 0x%" PRIxLEAST64 " that doesn't exist!", (uint64_t)(cb));
3547        return NULL;
3548    }
3549    return it->second;
3550}
3551// Free all CB Nodes
3552// NOTE : Calls to this function should be wrapped in mutex
3553static void deleteCommandBuffers(layer_data *my_data) {
3554    if (my_data->commandBufferMap.empty()) {
3555        return;
3556    }
3557    for (auto ii = my_data->commandBufferMap.begin(); ii != my_data->commandBufferMap.end(); ++ii) {
3558        delete (*ii).second;
3559    }
3560    my_data->commandBufferMap.clear();
3561}
3562
3563static bool report_error_no_cb_begin(const layer_data *dev_data, const VkCommandBuffer cb, const char *caller_name) {
3564    return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3565                   (uint64_t)cb, __LINE__, DRAWSTATE_NO_BEGIN_COMMAND_BUFFER, "DS",
3566                   "You must call vkBeginCommandBuffer() before this call to %s", caller_name);
3567}
3568
3569bool validateCmdsInCmdBuffer(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd_type) {
3570    if (!pCB->activeRenderPass)
3571        return false;
3572    bool skip_call = false;
3573    if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS &&
3574        (cmd_type != CMD_EXECUTECOMMANDS && cmd_type != CMD_NEXTSUBPASS && cmd_type != CMD_ENDRENDERPASS)) {
3575        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3576                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3577                             "Commands cannot be called in a subpass using secondary command buffers.");
3578    } else if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_INLINE && cmd_type == CMD_EXECUTECOMMANDS) {
3579        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3580                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3581                             "vkCmdExecuteCommands() cannot be called in a subpass using inline commands.");
3582    }
3583    return skip_call;
3584}
3585
3586static bool checkGraphicsBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
3587    if (!(flags & VK_QUEUE_GRAPHICS_BIT))
3588        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3589                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3590                       "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
3591    return false;
3592}
3593
3594static bool checkComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
3595    if (!(flags & VK_QUEUE_COMPUTE_BIT))
3596        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3597                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3598                       "Cannot call %s on a command buffer allocated from a pool without compute capabilities.", name);
3599    return false;
3600}
3601
3602static bool checkGraphicsOrComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
3603    if (!((flags & VK_QUEUE_GRAPHICS_BIT) || (flags & VK_QUEUE_COMPUTE_BIT)))
3604        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3605                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3606                       "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
3607    return false;
3608}
3609
3610// Add specified CMD to the CmdBuffer in given pCB, flagging errors if CB is not
3611//  in the recording state or if there's an issue with the Cmd ordering
3612static bool addCmd(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd, const char *caller_name) {
3613    bool skipCall = false;
3614    auto pool_data = my_data->commandPoolMap.find(pCB->createInfo.commandPool);
3615    if (pool_data != my_data->commandPoolMap.end()) {
3616        VkQueueFlags flags = my_data->phys_dev_properties.queue_family_properties[pool_data->second.queueFamilyIndex].queueFlags;
3617        switch (cmd) {
3618        case CMD_BINDPIPELINE:
3619        case CMD_BINDPIPELINEDELTA:
3620        case CMD_BINDDESCRIPTORSETS:
3621        case CMD_FILLBUFFER:
3622        case CMD_CLEARCOLORIMAGE:
3623        case CMD_SETEVENT:
3624        case CMD_RESETEVENT:
3625        case CMD_WAITEVENTS:
3626        case CMD_BEGINQUERY:
3627        case CMD_ENDQUERY:
3628        case CMD_RESETQUERYPOOL:
3629        case CMD_COPYQUERYPOOLRESULTS:
3630        case CMD_WRITETIMESTAMP:
3631            skipCall |= checkGraphicsOrComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
3632            break;
3633        case CMD_SETVIEWPORTSTATE:
3634        case CMD_SETSCISSORSTATE:
3635        case CMD_SETLINEWIDTHSTATE:
3636        case CMD_SETDEPTHBIASSTATE:
3637        case CMD_SETBLENDSTATE:
3638        case CMD_SETDEPTHBOUNDSSTATE:
3639        case CMD_SETSTENCILREADMASKSTATE:
3640        case CMD_SETSTENCILWRITEMASKSTATE:
3641        case CMD_SETSTENCILREFERENCESTATE:
3642        case CMD_BINDINDEXBUFFER:
3643        case CMD_BINDVERTEXBUFFER:
3644        case CMD_DRAW:
3645        case CMD_DRAWINDEXED:
3646        case CMD_DRAWINDIRECT:
3647        case CMD_DRAWINDEXEDINDIRECT:
3648        case CMD_BLITIMAGE:
3649        case CMD_CLEARATTACHMENTS:
3650        case CMD_CLEARDEPTHSTENCILIMAGE:
3651        case CMD_RESOLVEIMAGE:
3652        case CMD_BEGINRENDERPASS:
3653        case CMD_NEXTSUBPASS:
3654        case CMD_ENDRENDERPASS:
3655            skipCall |= checkGraphicsBit(my_data, flags, cmdTypeToString(cmd).c_str());
3656            break;
3657        case CMD_DISPATCH:
3658        case CMD_DISPATCHINDIRECT:
3659            skipCall |= checkComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
3660            break;
3661        case CMD_COPYBUFFER:
3662        case CMD_COPYIMAGE:
3663        case CMD_COPYBUFFERTOIMAGE:
3664        case CMD_COPYIMAGETOBUFFER:
3665        case CMD_CLONEIMAGEDATA:
3666        case CMD_UPDATEBUFFER:
3667        case CMD_PIPELINEBARRIER:
3668        case CMD_EXECUTECOMMANDS:
3669        case CMD_END:
3670            break;
3671        default:
3672            break;
3673        }
3674    }
3675    if (pCB->state != CB_RECORDING) {
3676        skipCall |= report_error_no_cb_begin(my_data, pCB->commandBuffer, caller_name);
3677    } else {
3678        skipCall |= validateCmdsInCmdBuffer(my_data, pCB, cmd);
3679        CMD_NODE cmdNode = {};
3680        // init cmd node and append to end of cmd LL
3681        cmdNode.cmdNumber = ++pCB->numCmds;
3682        cmdNode.type = cmd;
3683        pCB->cmds.push_back(cmdNode);
3684    }
3685    return skipCall;
3686}
3687// Reset the command buffer state
3688//  Maintain the createInfo and set state to CB_NEW, but clear all other state
3689static void resetCB(layer_data *dev_data, const VkCommandBuffer cb) {
3690    GLOBAL_CB_NODE *pCB = dev_data->commandBufferMap[cb];
3691    if (pCB) {
3692        pCB->in_use.store(0);
3693        pCB->cmds.clear();
3694        // Reset CB state (note that createInfo is not cleared)
3695        pCB->commandBuffer = cb;
3696        memset(&pCB->beginInfo, 0, sizeof(VkCommandBufferBeginInfo));
3697        memset(&pCB->inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo));
3698        pCB->numCmds = 0;
3699        memset(pCB->drawCount, 0, NUM_DRAW_TYPES * sizeof(uint64_t));
3700        pCB->state = CB_NEW;
3701        pCB->submitCount = 0;
3702        pCB->status = 0;
3703        pCB->viewports.clear();
3704        pCB->scissors.clear();
3705
3706        for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
3707            // Before clearing lastBoundState, remove any CB bindings from all uniqueBoundSets
3708            for (auto set : pCB->lastBound[i].uniqueBoundSets) {
3709                set->RemoveBoundCommandBuffer(pCB);
3710            }
3711            pCB->lastBound[i].reset();
3712        }
3713
3714        memset(&pCB->activeRenderPassBeginInfo, 0, sizeof(pCB->activeRenderPassBeginInfo));
3715        pCB->activeRenderPass = nullptr;
3716        pCB->activeSubpassContents = VK_SUBPASS_CONTENTS_INLINE;
3717        pCB->activeSubpass = 0;
3718        pCB->destroyedSets.clear();
3719        pCB->updatedSets.clear();
3720        pCB->destroyedFramebuffers.clear();
3721        pCB->waitedEvents.clear();
3722        pCB->events.clear();
3723        pCB->writeEventsBeforeWait.clear();
3724        pCB->waitedEventsBeforeQueryReset.clear();
3725        pCB->queryToStateMap.clear();
3726        pCB->activeQueries.clear();
3727        pCB->startedQueries.clear();
3728        pCB->imageSubresourceMap.clear();
3729        pCB->imageLayoutMap.clear();
3730        pCB->eventToStageMap.clear();
3731        pCB->drawData.clear();
3732        pCB->currentDrawData.buffers.clear();
3733        pCB->primaryCommandBuffer = VK_NULL_HANDLE;
3734        // Make sure any secondaryCommandBuffers are removed from globalInFlight
3735        for (auto secondary_cb : pCB->secondaryCommandBuffers) {
3736            dev_data->globalInFlightCmdBuffers.erase(secondary_cb);
3737        }
3738        pCB->secondaryCommandBuffers.clear();
3739        pCB->updateImages.clear();
3740        pCB->updateBuffers.clear();
3741        clear_cmd_buf_and_mem_references(dev_data, pCB);
3742        pCB->eventUpdates.clear();
3743        pCB->queryUpdates.clear();
3744
3745        // Remove this cmdBuffer's reference from each FrameBuffer's CB ref list
3746        for (auto framebuffer : pCB->framebuffers) {
3747            auto fbNode = getFramebuffer(dev_data, framebuffer);
3748            if (fbNode)
3749                fbNode->referencingCmdBuffers.erase(pCB->commandBuffer);
3750        }
3751        pCB->framebuffers.clear();
3752        pCB->activeFramebuffer = VK_NULL_HANDLE;
3753    }
3754}
3755
3756// Set PSO-related status bits for CB, including dynamic state set via PSO
3757static void set_cb_pso_status(GLOBAL_CB_NODE *pCB, const PIPELINE_NODE *pPipe) {
3758    // Account for any dynamic state not set via this PSO
3759    if (!pPipe->graphicsPipelineCI.pDynamicState ||
3760        !pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount) { // All state is static
3761        pCB->status = CBSTATUS_ALL;
3762    } else {
3763        // First consider all state on
3764        // Then unset any state that's noted as dynamic in PSO
3765        // Finally OR that into CB statemask
3766        CBStatusFlags psoDynStateMask = CBSTATUS_ALL;
3767        for (uint32_t i = 0; i < pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
3768            switch (pPipe->graphicsPipelineCI.pDynamicState->pDynamicStates[i]) {
3769            case VK_DYNAMIC_STATE_VIEWPORT:
3770                psoDynStateMask &= ~CBSTATUS_VIEWPORT_SET;
3771                break;
3772            case VK_DYNAMIC_STATE_SCISSOR:
3773                psoDynStateMask &= ~CBSTATUS_SCISSOR_SET;
3774                break;
3775            case VK_DYNAMIC_STATE_LINE_WIDTH:
3776                psoDynStateMask &= ~CBSTATUS_LINE_WIDTH_SET;
3777                break;
3778            case VK_DYNAMIC_STATE_DEPTH_BIAS:
3779                psoDynStateMask &= ~CBSTATUS_DEPTH_BIAS_SET;
3780                break;
3781            case VK_DYNAMIC_STATE_BLEND_CONSTANTS:
3782                psoDynStateMask &= ~CBSTATUS_BLEND_CONSTANTS_SET;
3783                break;
3784            case VK_DYNAMIC_STATE_DEPTH_BOUNDS:
3785                psoDynStateMask &= ~CBSTATUS_DEPTH_BOUNDS_SET;
3786                break;
3787            case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK:
3788                psoDynStateMask &= ~CBSTATUS_STENCIL_READ_MASK_SET;
3789                break;
3790            case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK:
3791                psoDynStateMask &= ~CBSTATUS_STENCIL_WRITE_MASK_SET;
3792                break;
3793            case VK_DYNAMIC_STATE_STENCIL_REFERENCE:
3794                psoDynStateMask &= ~CBSTATUS_STENCIL_REFERENCE_SET;
3795                break;
3796            default:
3797                // TODO : Flag error here
3798                break;
3799            }
3800        }
3801        pCB->status |= psoDynStateMask;
3802    }
3803}
3804
3805// Print the last bound Gfx Pipeline
3806static bool printPipeline(layer_data *my_data, const VkCommandBuffer cb) {
3807    bool skipCall = false;
3808    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb);
3809    if (pCB) {
3810        PIPELINE_NODE *pPipeTrav = getPipeline(my_data, pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline);
3811        if (!pPipeTrav) {
3812            // nothing to print
3813        } else {
3814            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3815                                __LINE__, DRAWSTATE_NONE, "DS", "%s",
3816                                vk_print_vkgraphicspipelinecreateinfo(
3817                                    reinterpret_cast<const VkGraphicsPipelineCreateInfo *>(&pPipeTrav->graphicsPipelineCI), "{DS}")
3818                                    .c_str());
3819        }
3820    }
3821    return skipCall;
3822}
3823
3824static void printCB(layer_data *my_data, const VkCommandBuffer cb) {
3825    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb);
3826    if (pCB && pCB->cmds.size() > 0) {
3827        log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3828                DRAWSTATE_NONE, "DS", "Cmds in CB 0x%p", (void *)cb);
3829        vector<CMD_NODE> cmds = pCB->cmds;
3830        for (auto ii = cmds.begin(); ii != cmds.end(); ++ii) {
3831            // TODO : Need to pass cb as srcObj here
3832            log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
3833                    __LINE__, DRAWSTATE_NONE, "DS", "  CMD 0x%" PRIx64 ": %s", (*ii).cmdNumber, cmdTypeToString((*ii).type).c_str());
3834        }
3835    } else {
3836        // Nothing to print
3837    }
3838}
3839
3840static bool synchAndPrintDSConfig(layer_data *my_data, const VkCommandBuffer cb) {
3841    bool skipCall = false;
3842    if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
3843        return skipCall;
3844    }
3845    skipCall |= printPipeline(my_data, cb);
3846    return skipCall;
3847}
3848
3849// Flags validation error if the associated call is made inside a render pass. The apiName
3850// routine should ONLY be called outside a render pass.
3851static bool insideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName) {
3852    bool inside = false;
3853    if (pCB->activeRenderPass) {
3854        inside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3855                         (uint64_t)pCB->commandBuffer, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
3856                         "%s: It is invalid to issue this call inside an active render pass (0x%" PRIxLEAST64 ")", apiName,
3857                         (uint64_t)pCB->activeRenderPass->renderPass);
3858    }
3859    return inside;
3860}
3861
3862// Flags validation error if the associated call is made outside a render pass. The apiName
3863// routine should ONLY be called inside a render pass.
3864static bool outsideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName) {
3865    bool outside = false;
3866    if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (!pCB->activeRenderPass)) ||
3867        ((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && (!pCB->activeRenderPass) &&
3868         !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))) {
3869        outside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3870                          (uint64_t)pCB->commandBuffer, __LINE__, DRAWSTATE_NO_ACTIVE_RENDERPASS, "DS",
3871                          "%s: This call must be issued inside an active render pass.", apiName);
3872    }
3873    return outside;
3874}
3875
3876static void init_core_validation(layer_data *instance_data, const VkAllocationCallbacks *pAllocator) {
3877
3878    layer_debug_actions(instance_data->report_data, instance_data->logging_callback, pAllocator, "lunarg_core_validation");
3879
3880}
3881
3882VKAPI_ATTR VkResult VKAPI_CALL
3883CreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkInstance *pInstance) {
3884    VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
3885
3886    assert(chain_info->u.pLayerInfo);
3887    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
3888    PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
3889    if (fpCreateInstance == NULL)
3890        return VK_ERROR_INITIALIZATION_FAILED;
3891
3892    // Advance the link info for the next element on the chain
3893    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
3894
3895    VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
3896    if (result != VK_SUCCESS)
3897        return result;
3898
3899    layer_data *instance_data = get_my_data_ptr(get_dispatch_key(*pInstance), layer_data_map);
3900    instance_data->instance = *pInstance;
3901    instance_data->instance_dispatch_table = new VkLayerInstanceDispatchTable;
3902    layer_init_instance_dispatch_table(*pInstance, instance_data->instance_dispatch_table, fpGetInstanceProcAddr);
3903
3904    instance_data->report_data =
3905        debug_report_create_instance(instance_data->instance_dispatch_table, *pInstance, pCreateInfo->enabledExtensionCount,
3906                                     pCreateInfo->ppEnabledExtensionNames);
3907
3908    init_core_validation(instance_data, pAllocator);
3909
3910    ValidateLayerOrdering(*pCreateInfo);
3911
3912    return result;
3913}
3914
3915/* hook DestroyInstance to remove tableInstanceMap entry */
3916VKAPI_ATTR void VKAPI_CALL DestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
3917    // TODOSC : Shouldn't need any customization here
3918    dispatch_key key = get_dispatch_key(instance);
3919    // TBD: Need any locking this early, in case this function is called at the
3920    // same time by more than one thread?
3921    layer_data *my_data = get_my_data_ptr(key, layer_data_map);
3922    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
3923    pTable->DestroyInstance(instance, pAllocator);
3924
3925    std::lock_guard<std::mutex> lock(global_lock);
3926    // Clean up logging callback, if any
3927    while (my_data->logging_callback.size() > 0) {
3928        VkDebugReportCallbackEXT callback = my_data->logging_callback.back();
3929        layer_destroy_msg_callback(my_data->report_data, callback, pAllocator);
3930        my_data->logging_callback.pop_back();
3931    }
3932
3933    layer_debug_report_destroy_instance(my_data->report_data);
3934    delete my_data->instance_dispatch_table;
3935    layer_data_map.erase(key);
3936}
3937
3938static void createDeviceRegisterExtensions(const VkDeviceCreateInfo *pCreateInfo, VkDevice device) {
3939    uint32_t i;
3940    // TBD: Need any locking, in case this function is called at the same time
3941    // by more than one thread?
3942    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
3943    dev_data->device_extensions.wsi_enabled = false;
3944
3945    VkLayerDispatchTable *pDisp = dev_data->device_dispatch_table;
3946    PFN_vkGetDeviceProcAddr gpa = pDisp->GetDeviceProcAddr;
3947    pDisp->CreateSwapchainKHR = (PFN_vkCreateSwapchainKHR)gpa(device, "vkCreateSwapchainKHR");
3948    pDisp->DestroySwapchainKHR = (PFN_vkDestroySwapchainKHR)gpa(device, "vkDestroySwapchainKHR");
3949    pDisp->GetSwapchainImagesKHR = (PFN_vkGetSwapchainImagesKHR)gpa(device, "vkGetSwapchainImagesKHR");
3950    pDisp->AcquireNextImageKHR = (PFN_vkAcquireNextImageKHR)gpa(device, "vkAcquireNextImageKHR");
3951    pDisp->QueuePresentKHR = (PFN_vkQueuePresentKHR)gpa(device, "vkQueuePresentKHR");
3952
3953    for (i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
3954        if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME) == 0)
3955            dev_data->device_extensions.wsi_enabled = true;
3956    }
3957}
3958
3959VKAPI_ATTR VkResult VKAPI_CALL CreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
3960                                            const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
3961    layer_data *my_instance_data = get_my_data_ptr(get_dispatch_key(gpu), layer_data_map);
3962    VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
3963
3964    assert(chain_info->u.pLayerInfo);
3965    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
3966    PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
3967    PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(my_instance_data->instance, "vkCreateDevice");
3968    if (fpCreateDevice == NULL) {
3969        return VK_ERROR_INITIALIZATION_FAILED;
3970    }
3971
3972    // Advance the link info for the next element on the chain
3973    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
3974
3975    VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
3976    if (result != VK_SUCCESS) {
3977        return result;
3978    }
3979
3980    std::unique_lock<std::mutex> lock(global_lock);
3981    layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(*pDevice), layer_data_map);
3982
3983    // Setup device dispatch table
3984    my_device_data->device_dispatch_table = new VkLayerDispatchTable;
3985    layer_init_device_dispatch_table(*pDevice, my_device_data->device_dispatch_table, fpGetDeviceProcAddr);
3986    my_device_data->device = *pDevice;
3987
3988    my_device_data->report_data = layer_debug_report_create_device(my_instance_data->report_data, *pDevice);
3989    createDeviceRegisterExtensions(pCreateInfo, *pDevice);
3990    // Get physical device limits for this device
3991    my_instance_data->instance_dispatch_table->GetPhysicalDeviceProperties(gpu, &(my_device_data->phys_dev_properties.properties));
3992    uint32_t count;
3993    my_instance_data->instance_dispatch_table->GetPhysicalDeviceQueueFamilyProperties(gpu, &count, nullptr);
3994    my_device_data->phys_dev_properties.queue_family_properties.resize(count);
3995    my_instance_data->instance_dispatch_table->GetPhysicalDeviceQueueFamilyProperties(
3996        gpu, &count, &my_device_data->phys_dev_properties.queue_family_properties[0]);
3997    // TODO: device limits should make sure these are compatible
3998    if (pCreateInfo->pEnabledFeatures) {
3999        my_device_data->phys_dev_properties.features = *pCreateInfo->pEnabledFeatures;
4000    } else {
4001        memset(&my_device_data->phys_dev_properties.features, 0, sizeof(VkPhysicalDeviceFeatures));
4002    }
4003    // Store physical device mem limits into device layer_data struct
4004    my_instance_data->instance_dispatch_table->GetPhysicalDeviceMemoryProperties(gpu, &my_device_data->phys_dev_mem_props);
4005    lock.unlock();
4006
4007    ValidateLayerOrdering(*pCreateInfo);
4008
4009    return result;
4010}
4011
4012// prototype
4013static void deleteRenderPasses(layer_data *);
4014VKAPI_ATTR void VKAPI_CALL DestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
4015    // TODOSC : Shouldn't need any customization here
4016    dispatch_key key = get_dispatch_key(device);
4017    layer_data *dev_data = get_my_data_ptr(key, layer_data_map);
4018    // Free all the memory
4019    std::unique_lock<std::mutex> lock(global_lock);
4020    deletePipelines(dev_data);
4021    deleteRenderPasses(dev_data);
4022    deleteCommandBuffers(dev_data);
4023    // This will also delete all sets in the pool & remove them from setMap
4024    deletePools(dev_data);
4025    // All sets should be removed
4026    assert(dev_data->setMap.empty());
4027    for (auto del_layout : dev_data->descriptorSetLayoutMap) {
4028        delete del_layout.second;
4029    }
4030    dev_data->descriptorSetLayoutMap.clear();
4031    dev_data->imageViewMap.clear();
4032    dev_data->imageMap.clear();
4033    dev_data->imageSubresourceMap.clear();
4034    dev_data->imageLayoutMap.clear();
4035    dev_data->bufferViewMap.clear();
4036    dev_data->bufferMap.clear();
4037    // Queues persist until device is destroyed
4038    dev_data->queueMap.clear();
4039    lock.unlock();
4040#if MTMERGESOURCE
4041    bool skipCall = false;
4042    lock.lock();
4043    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
4044            (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM", "Printing List details prior to vkDestroyDevice()");
4045    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
4046            (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM", "================================================");
4047    print_mem_list(dev_data);
4048    printCBList(dev_data);
4049    // Report any memory leaks
4050    DEVICE_MEM_INFO *pInfo = NULL;
4051    if (!dev_data->memObjMap.empty()) {
4052        for (auto ii = dev_data->memObjMap.begin(); ii != dev_data->memObjMap.end(); ++ii) {
4053            pInfo = (*ii).second.get();
4054            if (pInfo->allocInfo.allocationSize != 0) {
4055                // Valid Usage: All child objects created on device must have been destroyed prior to destroying device
4056                skipCall |=
4057                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4058                            VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pInfo->mem, __LINE__, MEMTRACK_MEMORY_LEAK,
4059                            "MEM", "Mem Object 0x%" PRIx64 " has not been freed. You should clean up this memory by calling "
4060                                   "vkFreeMemory(0x%" PRIx64 ") prior to vkDestroyDevice().",
4061                            (uint64_t)(pInfo->mem), (uint64_t)(pInfo->mem));
4062            }
4063        }
4064    }
4065    layer_debug_report_destroy_device(device);
4066    lock.unlock();
4067
4068#if DISPATCH_MAP_DEBUG
4069    fprintf(stderr, "Device: 0x%p, key: 0x%p\n", device, key);
4070#endif
4071    VkLayerDispatchTable *pDisp = dev_data->device_dispatch_table;
4072    if (!skipCall) {
4073        pDisp->DestroyDevice(device, pAllocator);
4074    }
4075#else
4076    dev_data->device_dispatch_table->DestroyDevice(device, pAllocator);
4077#endif
4078    delete dev_data->device_dispatch_table;
4079    layer_data_map.erase(key);
4080}
4081
4082static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION}};
4083
4084// This validates that the initial layout specified in the command buffer for
4085// the IMAGE is the same
4086// as the global IMAGE layout
4087static bool ValidateCmdBufImageLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
4088    bool skip_call = false;
4089    for (auto cb_image_data : pCB->imageLayoutMap) {
4090        VkImageLayout imageLayout;
4091        if (!FindLayout(dev_data, cb_image_data.first, imageLayout)) {
4092            skip_call |=
4093                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4094                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot submit cmd buffer using deleted image 0x%" PRIx64 ".",
4095                        reinterpret_cast<const uint64_t &>(cb_image_data.first));
4096        } else {
4097            if (cb_image_data.second.initialLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
4098                // TODO: Set memory invalid which is in mem_tracker currently
4099            } else if (imageLayout != cb_image_data.second.initialLayout) {
4100                if (cb_image_data.first.hasSubresource) {
4101                    skip_call |= log_msg(
4102                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4103                        reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
4104                        "Cannot submit cmd buffer using image (0x%" PRIx64 ") [sub-resource: aspectMask 0x%X array layer %u, mip level %u], "
4105                        "with layout %s when first use is %s.",
4106                        reinterpret_cast<const uint64_t &>(cb_image_data.first.image), cb_image_data.first.subresource.aspectMask,
4107                                cb_image_data.first.subresource.arrayLayer,
4108                                cb_image_data.first.subresource.mipLevel, string_VkImageLayout(imageLayout),
4109                        string_VkImageLayout(cb_image_data.second.initialLayout));
4110                } else {
4111                    skip_call |= log_msg(
4112                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4113                        reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
4114                        "Cannot submit cmd buffer using image (0x%" PRIx64 ") with layout %s when "
4115                        "first use is %s.",
4116                        reinterpret_cast<const uint64_t &>(cb_image_data.first.image), string_VkImageLayout(imageLayout),
4117                        string_VkImageLayout(cb_image_data.second.initialLayout));
4118                }
4119            }
4120            SetLayout(dev_data, cb_image_data.first, cb_image_data.second.layout);
4121        }
4122    }
4123    return skip_call;
4124}
4125
4126// Track which resources are in-flight by atomically incrementing their "in_use" count
4127static bool validateAndIncrementResources(layer_data *my_data, GLOBAL_CB_NODE *pCB, std::vector<VkSemaphore> const &semaphores) {
4128    bool skip_call = false;
4129    for (auto drawDataElement : pCB->drawData) {
4130        for (auto buffer : drawDataElement.buffers) {
4131            auto buffer_node = getBufferNode(my_data, buffer);
4132            if (!buffer_node) {
4133                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
4134                                     (uint64_t)(buffer), __LINE__, DRAWSTATE_INVALID_BUFFER, "DS",
4135                                     "Cannot submit cmd buffer using deleted buffer 0x%" PRIx64 ".", (uint64_t)(buffer));
4136            } else {
4137                buffer_node->in_use.fetch_add(1);
4138            }
4139        }
4140    }
4141    for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
4142        for (auto set : pCB->lastBound[i].uniqueBoundSets) {
4143            if (!my_data->setMap.count(set->GetSet())) {
4144                skip_call |=
4145                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4146                            (uint64_t)(set), __LINE__, DRAWSTATE_INVALID_DESCRIPTOR_SET, "DS",
4147                            "Cannot submit cmd buffer using deleted descriptor set 0x%" PRIx64 ".", (uint64_t)(set));
4148            } else {
4149                set->in_use.fetch_add(1);
4150            }
4151        }
4152    }
4153    for (auto semaphore : semaphores) {
4154        auto semaphoreNode = my_data->semaphoreMap.find(semaphore);
4155        if (semaphoreNode == my_data->semaphoreMap.end()) {
4156            skip_call |=
4157                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4158                        reinterpret_cast<uint64_t &>(semaphore), __LINE__, DRAWSTATE_INVALID_SEMAPHORE, "DS",
4159                        "Cannot submit cmd buffer using deleted semaphore 0x%" PRIx64 ".", reinterpret_cast<uint64_t &>(semaphore));
4160        } else {
4161            semaphoreNode->second.in_use.fetch_add(1);
4162        }
4163    }
4164    for (auto event : pCB->events) {
4165        auto eventNode = my_data->eventMap.find(event);
4166        if (eventNode == my_data->eventMap.end()) {
4167            skip_call |=
4168                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4169                        reinterpret_cast<uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
4170                        "Cannot submit cmd buffer using deleted event 0x%" PRIx64 ".", reinterpret_cast<uint64_t &>(event));
4171        } else {
4172            eventNode->second.in_use.fetch_add(1);
4173        }
4174    }
4175    for (auto event : pCB->writeEventsBeforeWait) {
4176        auto eventNode = my_data->eventMap.find(event);
4177        eventNode->second.write_in_use++;
4178    }
4179    return skip_call;
4180}
4181
4182// Note: This function assumes that the global lock is held by the calling
4183// thread.
4184static bool cleanInFlightCmdBuffer(layer_data *my_data, VkCommandBuffer cmdBuffer) {
4185    bool skip_call = false;
4186    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cmdBuffer);
4187    if (pCB) {
4188        for (auto queryEventsPair : pCB->waitedEventsBeforeQueryReset) {
4189            for (auto event : queryEventsPair.second) {
4190                if (my_data->eventMap[event].needsSignaled) {
4191                    skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4192                                         VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, 0, DRAWSTATE_INVALID_QUERY, "DS",
4193                                         "Cannot get query results on queryPool 0x%" PRIx64
4194                                         " with index %d which was guarded by unsignaled event 0x%" PRIx64 ".",
4195                                         (uint64_t)(queryEventsPair.first.pool), queryEventsPair.first.index, (uint64_t)(event));
4196                }
4197            }
4198        }
4199    }
4200    return skip_call;
4201}
4202// Decrement cmd_buffer in_use and if it goes to 0 remove cmd_buffer from globalInFlightCmdBuffers
4203static inline void removeInFlightCmdBuffer(layer_data *dev_data, VkCommandBuffer cmd_buffer) {
4204    // Pull it off of global list initially, but if we find it in any other queue list, add it back in
4205    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmd_buffer);
4206    pCB->in_use.fetch_sub(1);
4207    if (!pCB->in_use.load()) {
4208        dev_data->globalInFlightCmdBuffers.erase(cmd_buffer);
4209    }
4210}
4211
4212static void decrementResources(layer_data *my_data, CB_SUBMISSION *submission) {
4213    GLOBAL_CB_NODE *pCB = getCBNode(my_data, submission->cb);
4214    for (auto drawDataElement : pCB->drawData) {
4215        for (auto buffer : drawDataElement.buffers) {
4216            auto buffer_node = getBufferNode(my_data, buffer);
4217            if (buffer_node) {
4218                buffer_node->in_use.fetch_sub(1);
4219            }
4220        }
4221    }
4222    for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
4223        for (auto set : pCB->lastBound[i].uniqueBoundSets) {
4224            set->in_use.fetch_sub(1);
4225        }
4226    }
4227    for (auto semaphore : submission->semaphores) {
4228        auto semaphoreNode = my_data->semaphoreMap.find(semaphore);
4229        if (semaphoreNode != my_data->semaphoreMap.end()) {
4230            semaphoreNode->second.in_use.fetch_sub(1);
4231        }
4232    }
4233    for (auto event : pCB->events) {
4234        auto eventNode = my_data->eventMap.find(event);
4235        if (eventNode != my_data->eventMap.end()) {
4236            eventNode->second.in_use.fetch_sub(1);
4237        }
4238    }
4239    for (auto event : pCB->writeEventsBeforeWait) {
4240        auto eventNode = my_data->eventMap.find(event);
4241        if (eventNode != my_data->eventMap.end()) {
4242            eventNode->second.write_in_use--;
4243        }
4244    }
4245    for (auto queryStatePair : pCB->queryToStateMap) {
4246        my_data->queryToStateMap[queryStatePair.first] = queryStatePair.second;
4247    }
4248    for (auto eventStagePair : pCB->eventToStageMap) {
4249        my_data->eventMap[eventStagePair.first].stageMask = eventStagePair.second;
4250    }
4251}
4252// For fenceCount fences in pFences, mark fence signaled, decrement in_use, and call
4253//  decrementResources for all priorFences and cmdBuffers associated with fence.
4254static bool decrementResources(layer_data *my_data, uint32_t fenceCount, const VkFence *pFences) {
4255    bool skip_call = false;
4256    std::vector<std::pair<VkFence, FENCE_NODE *>> fence_pairs;
4257    for (uint32_t i = 0; i < fenceCount; ++i) {
4258        auto pFence = getFenceNode(my_data, pFences[i]);
4259        if (!pFence || !pFence->needsSignaled)
4260            return skip_call;
4261        pFence->needsSignaled = false;
4262        if (pFence->in_use.load()) {
4263            fence_pairs.emplace_back(pFences[i], pFence);
4264            pFence->in_use.fetch_sub(1);
4265        }
4266        decrementResources(my_data, static_cast<uint32_t>(pFence->priorFences.size()),
4267                           pFence->priorFences.data());
4268        for (auto & submission : pFence->submissions) {
4269            decrementResources(my_data, &submission);
4270            skip_call |= cleanInFlightCmdBuffer(my_data, submission.cb);
4271            removeInFlightCmdBuffer(my_data, submission.cb);
4272        }
4273        pFence->submissions.clear();
4274        pFence->priorFences.clear();
4275    }
4276    for (auto fence_pair : fence_pairs) {
4277        for (auto queue : fence_pair.second->queues) {
4278            auto pQueue = getQueueNode(my_data, queue);
4279            if (pQueue) {
4280                auto last_fence_data =
4281                    std::find(pQueue->lastFences.begin(), pQueue->lastFences.end(), fence_pair.first);
4282                if (last_fence_data != pQueue->lastFences.end())
4283                    pQueue->lastFences.erase(last_fence_data);
4284            }
4285        }
4286        for (auto& fence_data : my_data->fenceMap) {
4287          auto prior_fence_data =
4288              std::find(fence_data.second.priorFences.begin(), fence_data.second.priorFences.end(), fence_pair.first);
4289          if (prior_fence_data != fence_data.second.priorFences.end())
4290              fence_data.second.priorFences.erase(prior_fence_data);
4291        }
4292    }
4293    return skip_call;
4294}
4295// Decrement in_use for all outstanding cmd buffers that were submitted on this queue
4296static bool decrementResources(layer_data *my_data, VkQueue queue) {
4297    bool skip_call = false;
4298    auto queue_data = my_data->queueMap.find(queue);
4299    if (queue_data != my_data->queueMap.end()) {
4300        for (auto & submission : queue_data->second.untrackedSubmissions) {
4301            decrementResources(my_data, &submission);
4302            skip_call |= cleanInFlightCmdBuffer(my_data, submission.cb);
4303            removeInFlightCmdBuffer(my_data, submission.cb);
4304        }
4305        queue_data->second.untrackedSubmissions.clear();
4306        skip_call |= decrementResources(my_data, static_cast<uint32_t>(queue_data->second.lastFences.size()),
4307                                        queue_data->second.lastFences.data());
4308    }
4309    return skip_call;
4310}
4311
4312// This function merges command buffer tracking between queues when there is a semaphore dependency
4313// between them (see below for details as to how tracking works). When this happens, the prior
4314// fences from the signaling queue are merged into the wait queue as well as any untracked command
4315// buffers.
4316static void updateTrackedCommandBuffers(layer_data *dev_data, VkQueue queue, VkQueue other_queue, VkFence fence) {
4317    if (queue == other_queue) {
4318        return;
4319    }
4320    auto queue_data = dev_data->queueMap.find(queue);
4321    auto other_queue_data = dev_data->queueMap.find(other_queue);
4322    if (queue_data == dev_data->queueMap.end() || other_queue_data == dev_data->queueMap.end()) {
4323        return;
4324    }
4325    for (auto fenceInner : other_queue_data->second.lastFences) {
4326        queue_data->second.lastFences.push_back(fenceInner);
4327        auto fence_node = dev_data->fenceMap.find(fenceInner);
4328        if (fence_node != dev_data->fenceMap.end()) {
4329            fence_node->second.queues.insert(other_queue_data->first);
4330        }
4331    }
4332    // TODO: Stealing the untracked CBs out of the signaling queue isn't really
4333    // correct. A subsequent submission + wait, or a QWI on that queue, or
4334    // another semaphore dependency to a third queue may /all/ provide
4335    // suitable proof that the work we're stealing here has completed on the
4336    // device, but we've lost that information by moving the tracking between
4337    // queues.
4338    if (fence != VK_NULL_HANDLE) {
4339        auto fence_data = dev_data->fenceMap.find(fence);
4340        if (fence_data == dev_data->fenceMap.end()) {
4341            return;
4342        }
4343        for (auto cmdbuffer : other_queue_data->second.untrackedSubmissions) {
4344            fence_data->second.submissions.push_back(cmdbuffer);
4345        }
4346        other_queue_data->second.untrackedSubmissions.clear();
4347    } else {
4348        for (auto cmdbuffer : other_queue_data->second.untrackedSubmissions) {
4349            queue_data->second.untrackedSubmissions.push_back(cmdbuffer);
4350        }
4351        other_queue_data->second.untrackedSubmissions.clear();
4352    }
4353    for (auto eventStagePair : other_queue_data->second.eventToStageMap) {
4354        queue_data->second.eventToStageMap[eventStagePair.first] = eventStagePair.second;
4355    }
4356    for (auto queryStatePair : other_queue_data->second.queryToStateMap) {
4357        queue_data->second.queryToStateMap[queryStatePair.first] = queryStatePair.second;
4358    }
4359}
4360
4361// This is the core function for tracking command buffers. There are two primary ways command
4362// buffers are tracked. When submitted they are stored in the command buffer list associated
4363// with a fence or the untracked command buffer list associated with a queue if no fence is used.
4364// Each queue also stores the last fence that was submitted onto the queue. This allows us to
4365// create a linked list of fences and their associated command buffers so if one fence is
4366// waited on, prior fences on that queue are also considered to have been waited on. When a fence is
4367// waited on (either via a queue, device or fence), we free the cmd buffers for that fence and
4368// recursively call with the prior fences.
4369
4370
4371// Submit a fence to a queue, delimiting previous fences and previous untracked
4372// work by it.
4373static void
4374SubmitFence(QUEUE_NODE *pQueue, FENCE_NODE *pFence)
4375{
4376    assert(!pFence->priorFences.size());
4377    assert(!pFence->submissions.size());
4378
4379    std::swap(pFence->priorFences, pQueue->lastFences);
4380    std::swap(pFence->submissions, pQueue->untrackedSubmissions);
4381
4382    pFence->queues.insert(pQueue->queue);
4383    pFence->needsSignaled = true;
4384    pFence->in_use.fetch_add(1);
4385
4386    pQueue->lastFences.push_back(pFence->fence);
4387}
4388
4389static void markCommandBuffersInFlight(layer_data *my_data, VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits,
4390                                       VkFence fence) {
4391    auto queue_data = my_data->queueMap.find(queue);
4392    if (queue_data != my_data->queueMap.end()) {
4393        for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
4394            const VkSubmitInfo *submit = &pSubmits[submit_idx];
4395            for (uint32_t i = 0; i < submit->commandBufferCount; ++i) {
4396                // Add cmdBuffers to the global set and increment count
4397                GLOBAL_CB_NODE *pCB = getCBNode(my_data, submit->pCommandBuffers[i]);
4398                for (auto secondaryCmdBuffer : my_data->commandBufferMap[submit->pCommandBuffers[i]]->secondaryCommandBuffers) {
4399                    my_data->globalInFlightCmdBuffers.insert(secondaryCmdBuffer);
4400                    GLOBAL_CB_NODE *pSubCB = getCBNode(my_data, secondaryCmdBuffer);
4401                    pSubCB->in_use.fetch_add(1);
4402                }
4403                my_data->globalInFlightCmdBuffers.insert(submit->pCommandBuffers[i]);
4404                pCB->in_use.fetch_add(1);
4405            }
4406        }
4407    }
4408}
4409
4410static bool validateCommandBufferSimultaneousUse(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
4411    bool skip_call = false;
4412    if (dev_data->globalInFlightCmdBuffers.count(pCB->commandBuffer) &&
4413        !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
4414        skip_call |=
4415            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4416                    __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
4417                    "Command Buffer 0x%" PRIx64 " is already in use and is not marked for simultaneous use.",
4418                    reinterpret_cast<uint64_t>(pCB->commandBuffer));
4419    }
4420    return skip_call;
4421}
4422
4423static bool validateCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
4424    bool skipCall = false;
4425    // Validate ONE_TIME_SUBMIT_BIT CB is not being submitted more than once
4426    if ((pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) && (pCB->submitCount > 1)) {
4427        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4428                            __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
4429                            "CB 0x%" PRIxLEAST64 " was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT "
4430                            "set, but has been submitted 0x%" PRIxLEAST64 " times.",
4431                            (uint64_t)(pCB->commandBuffer), pCB->submitCount);
4432    }
4433    // Validate that cmd buffers have been updated
4434    if (CB_RECORDED != pCB->state) {
4435        if (CB_INVALID == pCB->state) {
4436            // Inform app of reason CB invalid
4437            bool causeReported = false;
4438            if (!pCB->destroyedSets.empty()) {
4439                std::stringstream set_string;
4440                for (auto set : pCB->destroyedSets)
4441                    set_string << " " << set;
4442
4443                skipCall |=
4444                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4445                            (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4446                            "You are submitting command buffer 0x%" PRIxLEAST64
4447                            " that is invalid because it had the following bound descriptor set(s) destroyed: %s",
4448                            (uint64_t)(pCB->commandBuffer), set_string.str().c_str());
4449                causeReported = true;
4450            }
4451            if (!pCB->updatedSets.empty()) {
4452                std::stringstream set_string;
4453                for (auto set : pCB->updatedSets)
4454                    set_string << " " << set;
4455
4456                skipCall |=
4457                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4458                            (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4459                            "You are submitting command buffer 0x%" PRIxLEAST64
4460                            " that is invalid because it had the following bound descriptor set(s) updated: %s",
4461                            (uint64_t)(pCB->commandBuffer), set_string.str().c_str());
4462                causeReported = true;
4463            }
4464            if (!pCB->destroyedFramebuffers.empty()) {
4465                std::stringstream fb_string;
4466                for (auto fb : pCB->destroyedFramebuffers)
4467                    fb_string << " " << fb;
4468
4469                skipCall |=
4470                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4471                            reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4472                            "You are submitting command buffer 0x%" PRIxLEAST64 " that is invalid because it had the following "
4473                            "referenced framebuffers destroyed: %s",
4474                            reinterpret_cast<uint64_t &>(pCB->commandBuffer), fb_string.str().c_str());
4475                causeReported = true;
4476            }
4477            // TODO : This is defensive programming to make sure an error is
4478            //  flagged if we hit this INVALID cmd buffer case and none of the
4479            //  above cases are hit. As the number of INVALID cases grows, this
4480            //  code should be updated to seemlessly handle all the cases.
4481            if (!causeReported) {
4482                skipCall |= log_msg(
4483                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4484                    reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4485                    "You are submitting command buffer 0x%" PRIxLEAST64 " that is invalid due to an unknown cause. Validation "
4486                    "should "
4487                    "be improved to report the exact cause.",
4488                    reinterpret_cast<uint64_t &>(pCB->commandBuffer));
4489            }
4490        } else { // Flag error for using CB w/o vkEndCommandBuffer() called
4491            skipCall |=
4492                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4493                        (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_NO_END_COMMAND_BUFFER, "DS",
4494                        "You must call vkEndCommandBuffer() on CB 0x%" PRIxLEAST64 " before this call to vkQueueSubmit()!",
4495                        (uint64_t)(pCB->commandBuffer));
4496        }
4497    }
4498    return skipCall;
4499}
4500
4501static bool validatePrimaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, std::vector<VkSemaphore> const &semaphores) {
4502    // Track in-use for resources off of primary and any secondary CBs
4503    bool skipCall = validateAndIncrementResources(dev_data, pCB, semaphores);
4504    if (!pCB->secondaryCommandBuffers.empty()) {
4505        for (auto secondaryCmdBuffer : pCB->secondaryCommandBuffers) {
4506            skipCall |= validateAndIncrementResources(dev_data, dev_data->commandBufferMap[secondaryCmdBuffer], semaphores);
4507            GLOBAL_CB_NODE *pSubCB = getCBNode(dev_data, secondaryCmdBuffer);
4508            if ((pSubCB->primaryCommandBuffer != pCB->commandBuffer) &&
4509                !(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
4510                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4511                        __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
4512                        "CB 0x%" PRIxLEAST64 " was submitted with secondary buffer 0x%" PRIxLEAST64
4513                        " but that buffer has subsequently been bound to "
4514                        "primary cmd buffer 0x%" PRIxLEAST64
4515                        " and it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set.",
4516                        reinterpret_cast<uint64_t>(pCB->commandBuffer), reinterpret_cast<uint64_t>(secondaryCmdBuffer),
4517                        reinterpret_cast<uint64_t>(pSubCB->primaryCommandBuffer));
4518            }
4519        }
4520    }
4521    skipCall |= validateCommandBufferState(dev_data, pCB);
4522    // If USAGE_SIMULTANEOUS_USE_BIT not set then CB cannot already be executing
4523    // on device
4524    skipCall |= validateCommandBufferSimultaneousUse(dev_data, pCB);
4525    return skipCall;
4526}
4527
4528static bool
4529ValidateFenceForSubmit(layer_data *dev_data, FENCE_NODE *pFence)
4530{
4531    bool skipCall = false;
4532
4533    if (pFence) {
4534        if (pFence->in_use.load()) {
4535            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4536                                (uint64_t)(pFence->fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
4537                                "Fence 0x%" PRIx64 " is already in use by another submission.", (uint64_t)(pFence->fence));
4538        }
4539
4540        if (!pFence->needsSignaled) {
4541            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4542                                reinterpret_cast<uint64_t &>(pFence->fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
4543                                "Fence 0x%" PRIxLEAST64 " submitted in SIGNALED state.  Fences must be reset before being submitted",
4544                                reinterpret_cast<uint64_t &>(pFence->fence));
4545        }
4546    }
4547
4548    return skipCall;
4549}
4550
4551
4552VKAPI_ATTR VkResult VKAPI_CALL
4553QueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
4554    bool skipCall = false;
4555    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
4556    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
4557    std::unique_lock<std::mutex> lock(global_lock);
4558
4559    auto pQueue = getQueueNode(dev_data, queue);
4560    auto pFence = getFenceNode(dev_data, fence);
4561    skipCall |= ValidateFenceForSubmit(dev_data, pFence);
4562
4563    if (skipCall) {
4564        return VK_ERROR_VALIDATION_FAILED_EXT;
4565    }
4566
4567    // TODO : Review these old print functions and clean up as appropriate
4568    print_mem_list(dev_data);
4569    printCBList(dev_data);
4570
4571    // Mark the fence in-use.
4572    if (pFence) {
4573        SubmitFence(pQueue, pFence);
4574    }
4575
4576    // If a fence is supplied, all the command buffers for this call will be
4577    // delimited by that fence. Otherwise, they go in the untracked portion of
4578    // the queue, and may end up being delimited by a fence supplied in a
4579    // subsequent submission.
4580    auto & submitTarget = pFence ? pFence->submissions : pQueue->untrackedSubmissions;
4581
4582    // Now verify each individual submit
4583    std::unordered_set<VkQueue> processed_other_queues;
4584    for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
4585        const VkSubmitInfo *submit = &pSubmits[submit_idx];
4586        vector<VkSemaphore> semaphoreList;
4587        for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
4588            const VkSemaphore &semaphore = submit->pWaitSemaphores[i];
4589            semaphoreList.push_back(semaphore);
4590            if (dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) {
4591                if (dev_data->semaphoreMap[semaphore].signaled) {
4592                    dev_data->semaphoreMap[semaphore].signaled = false;
4593                } else {
4594                    skipCall |=
4595                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
4596                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
4597                                "Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.",
4598                                reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
4599                }
4600                const VkQueue &other_queue = dev_data->semaphoreMap[semaphore].queue;
4601                if (other_queue != VK_NULL_HANDLE && !processed_other_queues.count(other_queue)) {
4602                    updateTrackedCommandBuffers(dev_data, queue, other_queue, fence);
4603                    processed_other_queues.insert(other_queue);
4604                }
4605            }
4606        }
4607        for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
4608            const VkSemaphore &semaphore = submit->pSignalSemaphores[i];
4609            if (dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) {
4610                semaphoreList.push_back(semaphore);
4611                if (dev_data->semaphoreMap[semaphore].signaled) {
4612                    skipCall |=
4613                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
4614                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
4615                                "Queue 0x%" PRIx64 " is signaling semaphore 0x%" PRIx64
4616                                " that has already been signaled but not waited on by queue 0x%" PRIx64 ".",
4617                                reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore),
4618                                reinterpret_cast<uint64_t &>(dev_data->semaphoreMap[semaphore].queue));
4619                } else {
4620                    dev_data->semaphoreMap[semaphore].signaled = true;
4621                    dev_data->semaphoreMap[semaphore].queue = queue;
4622                }
4623            }
4624        }
4625
4626        // TODO: just add one submission per VkSubmitInfo!
4627        for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
4628            auto pCBNode = getCBNode(dev_data, submit->pCommandBuffers[i]);
4629            skipCall |= ValidateCmdBufImageLayouts(dev_data, pCBNode);
4630            if (pCBNode) {
4631
4632                submitTarget.emplace_back(pCBNode->commandBuffer, semaphoreList);
4633                for (auto secondaryCmdBuffer : pCBNode->secondaryCommandBuffers) {
4634                    submitTarget.emplace_back(secondaryCmdBuffer, semaphoreList);
4635                }
4636
4637                pCBNode->submitCount++; // increment submit count
4638                skipCall |= validatePrimaryCommandBufferState(dev_data, pCBNode, semaphoreList);
4639                // Call submit-time functions to validate/update state
4640                for (auto &function : pCBNode->validate_functions) {
4641                    skipCall |= function();
4642                }
4643                for (auto &function : pCBNode->eventUpdates) {
4644                    skipCall |= function(queue);
4645                }
4646                for (auto &function : pCBNode->queryUpdates) {
4647                    skipCall |= function(queue);
4648                }
4649            }
4650        }
4651    }
4652    markCommandBuffersInFlight(dev_data, queue, submitCount, pSubmits, fence);
4653    lock.unlock();
4654    if (!skipCall)
4655        result = dev_data->device_dispatch_table->QueueSubmit(queue, submitCount, pSubmits, fence);
4656
4657    return result;
4658}
4659
4660VKAPI_ATTR VkResult VKAPI_CALL AllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
4661                                              const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) {
4662    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4663    VkResult result = my_data->device_dispatch_table->AllocateMemory(device, pAllocateInfo, pAllocator, pMemory);
4664    // TODO : Track allocations and overall size here
4665    std::lock_guard<std::mutex> lock(global_lock);
4666    add_mem_obj_info(my_data, device, *pMemory, pAllocateInfo);
4667    print_mem_list(my_data);
4668    return result;
4669}
4670
4671VKAPI_ATTR void VKAPI_CALL
4672FreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
4673    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4674
4675    // From spec : A memory object is freed by calling vkFreeMemory() when it is no longer needed.
4676    // Before freeing a memory object, an application must ensure the memory object is no longer
4677    // in use by the device—for example by command buffers queued for execution. The memory need
4678    // not yet be unbound from all images and buffers, but any further use of those images or
4679    // buffers (on host or device) for anything other than destroying those objects will result in
4680    // undefined behavior.
4681
4682    std::unique_lock<std::mutex> lock(global_lock);
4683    freeMemObjInfo(my_data, device, mem, false);
4684    print_mem_list(my_data);
4685    printCBList(my_data);
4686    lock.unlock();
4687    my_data->device_dispatch_table->FreeMemory(device, mem, pAllocator);
4688}
4689
4690static bool validateMemRange(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
4691    bool skipCall = false;
4692
4693    if (size == 0) {
4694        skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
4695                           (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
4696                           "VkMapMemory: Attempting to map memory range of size zero");
4697    }
4698
4699    auto mem_element = my_data->memObjMap.find(mem);
4700    if (mem_element != my_data->memObjMap.end()) {
4701        auto mem_info = mem_element->second.get();
4702        // It is an application error to call VkMapMemory on an object that is already mapped
4703        if (mem_info->memRange.size != 0) {
4704            skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
4705                               (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
4706                               "VkMapMemory: Attempting to map memory on an already-mapped object 0x%" PRIxLEAST64, (uint64_t)mem);
4707        }
4708
4709        // Validate that offset + size is within object's allocationSize
4710        if (size == VK_WHOLE_SIZE) {
4711            if (offset >= mem_info->allocInfo.allocationSize) {
4712                skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4713                                   VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP,
4714                                   "MEM", "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64
4715                                          " with size of VK_WHOLE_SIZE oversteps total array size 0x%" PRIx64,
4716                                   offset, mem_info->allocInfo.allocationSize, mem_info->allocInfo.allocationSize);
4717            }
4718        } else {
4719            if ((offset + size) > mem_info->allocInfo.allocationSize) {
4720                skipCall =
4721                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
4722                            (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
4723                            "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64 " oversteps total array size 0x%" PRIx64, offset,
4724                            size + offset, mem_info->allocInfo.allocationSize);
4725            }
4726        }
4727    }
4728    return skipCall;
4729}
4730
4731static void storeMemRanges(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
4732    auto mem_info = getMemObjInfo(my_data, mem);
4733    if (mem_info) {
4734        mem_info->memRange.offset = offset;
4735        mem_info->memRange.size = size;
4736    }
4737}
4738
4739static bool deleteMemRanges(layer_data *my_data, VkDeviceMemory mem) {
4740    bool skipCall = false;
4741    auto mem_info = getMemObjInfo(my_data, mem);
4742    if (mem_info) {
4743        if (!mem_info->memRange.size) {
4744            // Valid Usage: memory must currently be mapped
4745            skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
4746                               (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
4747                               "Unmapping Memory without memory being mapped: mem obj 0x%" PRIxLEAST64, (uint64_t)mem);
4748        }
4749        mem_info->memRange.size = 0;
4750        if (mem_info->pData) {
4751            free(mem_info->pData);
4752            mem_info->pData = 0;
4753        }
4754    }
4755    return skipCall;
4756}
4757
4758static char NoncoherentMemoryFillValue = 0xb;
4759
4760static void initializeAndTrackMemory(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize size, void **ppData) {
4761    auto mem_info = getMemObjInfo(dev_data, mem);
4762    if (mem_info) {
4763        mem_info->pDriverData = *ppData;
4764        uint32_t index = mem_info->allocInfo.memoryTypeIndex;
4765        if (dev_data->phys_dev_mem_props.memoryTypes[index].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) {
4766            mem_info->pData = 0;
4767        } else {
4768            if (size == VK_WHOLE_SIZE) {
4769                size = mem_info->allocInfo.allocationSize;
4770            }
4771            size_t convSize = (size_t)(size);
4772            mem_info->pData = malloc(2 * convSize);
4773            memset(mem_info->pData, NoncoherentMemoryFillValue, 2 * convSize);
4774            *ppData = static_cast<char *>(mem_info->pData) + (convSize / 2);
4775        }
4776    }
4777}
4778// Verify that state for fence being waited on is appropriate. That is,
4779//  a fence being waited on should not already be signalled and
4780//  it should have been submitted on a queue or during acquire next image
4781static inline bool verifyWaitFenceState(layer_data *dev_data, VkFence fence, const char *apiCall) {
4782    bool skipCall = false;
4783
4784    auto pFence = getFenceNode(dev_data, fence);
4785    if (pFence) {
4786        if (!pFence->firstTimeFlag) {
4787            if (!pFence->needsSignaled) {
4788                skipCall |=
4789                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4790                            (uint64_t)fence, __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
4791                            "%s specified fence 0x%" PRIxLEAST64 " already in SIGNALED state.", apiCall, (uint64_t)fence);
4792            }
4793            if (pFence->queues.empty() && !pFence->swapchain) { // Checking status of unsubmitted fence
4794                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4795                                    reinterpret_cast<uint64_t &>(fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
4796                                    "%s called for fence 0x%" PRIxLEAST64 " which has not been submitted on a Queue or during "
4797                                    "acquire next image.",
4798                                    apiCall, reinterpret_cast<uint64_t &>(fence));
4799            }
4800        } else {
4801            pFence->firstTimeFlag = false;
4802        }
4803    }
4804    return skipCall;
4805}
4806
4807VKAPI_ATTR VkResult VKAPI_CALL
4808WaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll, uint64_t timeout) {
4809    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4810    bool skip_call = false;
4811    // Verify fence status of submitted fences
4812    std::unique_lock<std::mutex> lock(global_lock);
4813    for (uint32_t i = 0; i < fenceCount; i++) {
4814        skip_call |= verifyWaitFenceState(dev_data, pFences[i], "vkWaitForFences");
4815    }
4816    lock.unlock();
4817    if (skip_call)
4818        return VK_ERROR_VALIDATION_FAILED_EXT;
4819
4820    VkResult result = dev_data->device_dispatch_table->WaitForFences(device, fenceCount, pFences, waitAll, timeout);
4821
4822    if (result == VK_SUCCESS) {
4823        lock.lock();
4824        // When we know that all fences are complete we can clean/remove their CBs
4825        if (waitAll || fenceCount == 1) {
4826            skip_call |= decrementResources(dev_data, fenceCount, pFences);
4827        }
4828        // NOTE : Alternate case not handled here is when some fences have completed. In
4829        //  this case for app to guarantee which fences completed it will have to call
4830        //  vkGetFenceStatus() at which point we'll clean/remove their CBs if complete.
4831        lock.unlock();
4832    }
4833    if (skip_call)
4834        return VK_ERROR_VALIDATION_FAILED_EXT;
4835    return result;
4836}
4837
4838VKAPI_ATTR VkResult VKAPI_CALL GetFenceStatus(VkDevice device, VkFence fence) {
4839    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4840    bool skipCall = false;
4841    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
4842    std::unique_lock<std::mutex> lock(global_lock);
4843    skipCall = verifyWaitFenceState(dev_data, fence, "vkGetFenceStatus");
4844    lock.unlock();
4845
4846    if (skipCall)
4847        return result;
4848
4849    result = dev_data->device_dispatch_table->GetFenceStatus(device, fence);
4850    bool skip_call = false;
4851    lock.lock();
4852    if (result == VK_SUCCESS) {
4853        skipCall |= decrementResources(dev_data, 1, &fence);
4854    }
4855    lock.unlock();
4856    if (skip_call)
4857        return VK_ERROR_VALIDATION_FAILED_EXT;
4858    return result;
4859}
4860
4861VKAPI_ATTR void VKAPI_CALL GetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex,
4862                                                            VkQueue *pQueue) {
4863    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4864    dev_data->device_dispatch_table->GetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue);
4865    std::lock_guard<std::mutex> lock(global_lock);
4866
4867    // Add queue to tracking set only if it is new
4868    auto result = dev_data->queues.emplace(*pQueue);
4869    if (result.second == true) {
4870        QUEUE_NODE *pQNode = &dev_data->queueMap[*pQueue];
4871        pQNode->queue = *pQueue;
4872        pQNode->device = device;
4873    }
4874}
4875
4876VKAPI_ATTR VkResult VKAPI_CALL QueueWaitIdle(VkQueue queue) {
4877    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
4878    bool skip_call = false;
4879    skip_call |= decrementResources(dev_data, queue);
4880    if (skip_call)
4881        return VK_ERROR_VALIDATION_FAILED_EXT;
4882    VkResult result = dev_data->device_dispatch_table->QueueWaitIdle(queue);
4883    return result;
4884}
4885
4886VKAPI_ATTR VkResult VKAPI_CALL DeviceWaitIdle(VkDevice device) {
4887    bool skip_call = false;
4888    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4889    std::unique_lock<std::mutex> lock(global_lock);
4890    for (auto queue : dev_data->queues) {
4891        skip_call |= decrementResources(dev_data, queue);
4892    }
4893    dev_data->globalInFlightCmdBuffers.clear();
4894    lock.unlock();
4895    if (skip_call)
4896        return VK_ERROR_VALIDATION_FAILED_EXT;
4897    VkResult result = dev_data->device_dispatch_table->DeviceWaitIdle(device);
4898    return result;
4899}
4900
4901VKAPI_ATTR void VKAPI_CALL DestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) {
4902    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4903    bool skipCall = false;
4904    std::unique_lock<std::mutex> lock(global_lock);
4905    auto fence_pair = dev_data->fenceMap.find(fence);
4906    if (fence_pair != dev_data->fenceMap.end()) {
4907        if (fence_pair->second.in_use.load()) {
4908            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4909                                (uint64_t)(fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
4910                                "Fence 0x%" PRIx64 " is in use by a command buffer.", (uint64_t)(fence));
4911        }
4912        dev_data->fenceMap.erase(fence_pair);
4913    }
4914    lock.unlock();
4915
4916    if (!skipCall)
4917        dev_data->device_dispatch_table->DestroyFence(device, fence, pAllocator);
4918}
4919
4920VKAPI_ATTR void VKAPI_CALL
4921DestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) {
4922    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4923    dev_data->device_dispatch_table->DestroySemaphore(device, semaphore, pAllocator);
4924    std::lock_guard<std::mutex> lock(global_lock);
4925    auto item = dev_data->semaphoreMap.find(semaphore);
4926    if (item != dev_data->semaphoreMap.end()) {
4927        if (item->second.in_use.load()) {
4928            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
4929                    reinterpret_cast<uint64_t &>(semaphore), __LINE__, DRAWSTATE_INVALID_SEMAPHORE, "DS",
4930                    "Cannot delete semaphore 0x%" PRIx64 " which is in use.", reinterpret_cast<uint64_t &>(semaphore));
4931        }
4932        dev_data->semaphoreMap.erase(semaphore);
4933    }
4934    // TODO : Clean up any internal data structures using this obj.
4935}
4936
4937VKAPI_ATTR void VKAPI_CALL DestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
4938    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4939    bool skip_call = false;
4940    std::unique_lock<std::mutex> lock(global_lock);
4941    auto event_data = dev_data->eventMap.find(event);
4942    if (event_data != dev_data->eventMap.end()) {
4943        if (event_data->second.in_use.load()) {
4944            skip_call |= log_msg(
4945                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4946                reinterpret_cast<uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
4947                "Cannot delete event 0x%" PRIx64 " which is in use by a command buffer.", reinterpret_cast<uint64_t &>(event));
4948        }
4949        dev_data->eventMap.erase(event_data);
4950    }
4951    lock.unlock();
4952    if (!skip_call)
4953        dev_data->device_dispatch_table->DestroyEvent(device, event, pAllocator);
4954    // TODO : Clean up any internal data structures using this obj.
4955}
4956
4957VKAPI_ATTR void VKAPI_CALL
4958DestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) {
4959    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
4960        ->device_dispatch_table->DestroyQueryPool(device, queryPool, pAllocator);
4961    // TODO : Clean up any internal data structures using this obj.
4962}
4963
4964VKAPI_ATTR VkResult VKAPI_CALL GetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery,
4965                                                   uint32_t queryCount, size_t dataSize, void *pData, VkDeviceSize stride,
4966                                                   VkQueryResultFlags flags) {
4967    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4968    unordered_map<QueryObject, vector<VkCommandBuffer>> queriesInFlight;
4969    std::unique_lock<std::mutex> lock(global_lock);
4970    for (auto cmdBuffer : dev_data->globalInFlightCmdBuffers) {
4971        auto pCB = getCBNode(dev_data, cmdBuffer);
4972        for (auto queryStatePair : pCB->queryToStateMap) {
4973            queriesInFlight[queryStatePair.first].push_back(cmdBuffer);
4974        }
4975    }
4976    bool skip_call = false;
4977    for (uint32_t i = 0; i < queryCount; ++i) {
4978        QueryObject query = {queryPool, firstQuery + i};
4979        auto queryElement = queriesInFlight.find(query);
4980        auto queryToStateElement = dev_data->queryToStateMap.find(query);
4981        if (queryToStateElement != dev_data->queryToStateMap.end()) {
4982            // Available and in flight
4983            if (queryElement != queriesInFlight.end() && queryToStateElement != dev_data->queryToStateMap.end() &&
4984                queryToStateElement->second) {
4985                for (auto cmdBuffer : queryElement->second) {
4986                    auto pCB = getCBNode(dev_data, cmdBuffer);
4987                    auto queryEventElement = pCB->waitedEventsBeforeQueryReset.find(query);
4988                    if (queryEventElement == pCB->waitedEventsBeforeQueryReset.end()) {
4989                        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4990                                             VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
4991                                             "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is in flight.",
4992                                             (uint64_t)(queryPool), firstQuery + i);
4993                    } else {
4994                        for (auto event : queryEventElement->second) {
4995                            dev_data->eventMap[event].needsSignaled = true;
4996                        }
4997                    }
4998                }
4999                // Unavailable and in flight
5000            } else if (queryElement != queriesInFlight.end() && queryToStateElement != dev_data->queryToStateMap.end() &&
5001                       !queryToStateElement->second) {
5002                // TODO : Can there be the same query in use by multiple command buffers in flight?
5003                bool make_available = false;
5004                for (auto cmdBuffer : queryElement->second) {
5005                    auto pCB = getCBNode(dev_data, cmdBuffer);
5006                    make_available |= pCB->queryToStateMap[query];
5007                }
5008                if (!(((flags & VK_QUERY_RESULT_PARTIAL_BIT) || (flags & VK_QUERY_RESULT_WAIT_BIT)) && make_available)) {
5009                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5010                                         VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5011                                         "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is unavailable.",
5012                                         (uint64_t)(queryPool), firstQuery + i);
5013                }
5014                // Unavailable
5015            } else if (queryToStateElement != dev_data->queryToStateMap.end() && !queryToStateElement->second) {
5016                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5017                                     VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5018                                     "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is unavailable.",
5019                                     (uint64_t)(queryPool), firstQuery + i);
5020                // Unitialized
5021            } else if (queryToStateElement == dev_data->queryToStateMap.end()) {
5022                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5023                                     VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5024                                     "Cannot get query results on queryPool 0x%" PRIx64
5025                                     " with index %d as data has not been collected for this index.",
5026                                     (uint64_t)(queryPool), firstQuery + i);
5027            }
5028        }
5029    }
5030    lock.unlock();
5031    if (skip_call)
5032        return VK_ERROR_VALIDATION_FAILED_EXT;
5033    return dev_data->device_dispatch_table->GetQueryPoolResults(device, queryPool, firstQuery, queryCount, dataSize, pData, stride,
5034                                                                flags);
5035}
5036
5037static bool validateIdleBuffer(const layer_data *my_data, VkBuffer buffer) {
5038    bool skip_call = false;
5039    auto buffer_node = getBufferNode(my_data, buffer);
5040    if (!buffer_node) {
5041        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5042                             (uint64_t)(buffer), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
5043                             "Cannot free buffer 0x%" PRIxLEAST64 " that has not been allocated.", (uint64_t)(buffer));
5044    } else {
5045        if (buffer_node->in_use.load()) {
5046            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5047                                 (uint64_t)(buffer), __LINE__, DRAWSTATE_OBJECT_INUSE, "DS",
5048                                 "Cannot free buffer 0x%" PRIxLEAST64 " that is in use by a command buffer.", (uint64_t)(buffer));
5049        }
5050    }
5051    return skip_call;
5052}
5053
5054static bool print_memory_range_error(layer_data *dev_data, const uint64_t object_handle, const uint64_t other_handle,
5055                                     VkDebugReportObjectTypeEXT object_type) {
5056    if (object_type == VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT) {
5057        return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object_handle, 0,
5058                       MEMTRACK_INVALID_ALIASING, "MEM", "Buffer 0x%" PRIx64 " is aliased with image 0x%" PRIx64, object_handle,
5059                       other_handle);
5060    } else {
5061        return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object_handle, 0,
5062                       MEMTRACK_INVALID_ALIASING, "MEM", "Image 0x%" PRIx64 " is aliased with buffer 0x%" PRIx64, object_handle,
5063                       other_handle);
5064    }
5065}
5066
5067static bool validate_memory_range(layer_data *dev_data, const vector<MEMORY_RANGE> &ranges, const MEMORY_RANGE &new_range,
5068                                  VkDebugReportObjectTypeEXT object_type) {
5069    bool skip_call = false;
5070
5071    for (auto range : ranges) {
5072        if ((range.end & ~(dev_data->phys_dev_properties.properties.limits.bufferImageGranularity - 1)) <
5073            (new_range.start & ~(dev_data->phys_dev_properties.properties.limits.bufferImageGranularity - 1)))
5074            continue;
5075        if ((range.start & ~(dev_data->phys_dev_properties.properties.limits.bufferImageGranularity - 1)) >
5076            (new_range.end & ~(dev_data->phys_dev_properties.properties.limits.bufferImageGranularity - 1)))
5077            continue;
5078        skip_call |= print_memory_range_error(dev_data, new_range.handle, range.handle, object_type);
5079    }
5080    return skip_call;
5081}
5082
5083static MEMORY_RANGE insert_memory_ranges(uint64_t handle, VkDeviceMemory mem, VkDeviceSize memoryOffset,
5084                                         VkMemoryRequirements memRequirements, vector<MEMORY_RANGE> &ranges) {
5085    MEMORY_RANGE range;
5086    range.handle = handle;
5087    range.memory = mem;
5088    range.start = memoryOffset;
5089    range.end = memoryOffset + memRequirements.size - 1;
5090    ranges.push_back(range);
5091    return range;
5092}
5093
5094static void remove_memory_ranges(uint64_t handle, VkDeviceMemory mem, vector<MEMORY_RANGE> &ranges) {
5095    for (uint32_t item = 0; item < ranges.size(); item++) {
5096        if ((ranges[item].handle == handle) && (ranges[item].memory == mem)) {
5097            ranges.erase(ranges.begin() + item);
5098            break;
5099        }
5100    }
5101}
5102
5103VKAPI_ATTR void VKAPI_CALL DestroyBuffer(VkDevice device, VkBuffer buffer,
5104                                         const VkAllocationCallbacks *pAllocator) {
5105    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5106    bool skipCall = false;
5107    std::unique_lock<std::mutex> lock(global_lock);
5108    if (!validateIdleBuffer(dev_data, buffer) && !skipCall) {
5109        lock.unlock();
5110        dev_data->device_dispatch_table->DestroyBuffer(device, buffer, pAllocator);
5111        lock.lock();
5112    }
5113    // Clean up memory binding and range information for buffer
5114    auto buff_it = dev_data->bufferMap.find(buffer);
5115    if (buff_it != dev_data->bufferMap.end()) {
5116        auto mem_info = getMemObjInfo(dev_data, buff_it->second.get()->mem);
5117        if (mem_info) {
5118            remove_memory_ranges(reinterpret_cast<uint64_t &>(buffer), buff_it->second.get()->mem, mem_info->bufferRanges);
5119        }
5120        clear_object_binding(dev_data, reinterpret_cast<uint64_t &>(buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT);
5121        dev_data->bufferMap.erase(buff_it);
5122    }
5123}
5124
5125VKAPI_ATTR void VKAPI_CALL
5126DestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator) {
5127    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5128    dev_data->device_dispatch_table->DestroyBufferView(device, bufferView, pAllocator);
5129    std::lock_guard<std::mutex> lock(global_lock);
5130    auto item = dev_data->bufferViewMap.find(bufferView);
5131    if (item != dev_data->bufferViewMap.end()) {
5132        dev_data->bufferViewMap.erase(item);
5133    }
5134}
5135
5136VKAPI_ATTR void VKAPI_CALL DestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
5137    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5138    bool skipCall = false;
5139    if (!skipCall) {
5140        dev_data->device_dispatch_table->DestroyImage(device, image, pAllocator);
5141    }
5142
5143    std::lock_guard<std::mutex> lock(global_lock);
5144    const auto &imageEntry = dev_data->imageMap.find(image);
5145    if (imageEntry != dev_data->imageMap.end()) {
5146        // Clean up memory mapping, bindings and range references for image
5147        auto mem_info = getMemObjInfo(dev_data, imageEntry->second.get()->mem);
5148        if (mem_info) {
5149            remove_memory_ranges(reinterpret_cast<uint64_t &>(image), imageEntry->second.get()->mem, mem_info->imageRanges);
5150            clear_object_binding(dev_data, reinterpret_cast<uint64_t &>(image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
5151            mem_info->image = VK_NULL_HANDLE;
5152        }
5153        // Remove image from imageMap
5154        dev_data->imageMap.erase(imageEntry);
5155    }
5156    const auto& subEntry = dev_data->imageSubresourceMap.find(image);
5157    if (subEntry != dev_data->imageSubresourceMap.end()) {
5158        for (const auto& pair : subEntry->second) {
5159            dev_data->imageLayoutMap.erase(pair);
5160        }
5161        dev_data->imageSubresourceMap.erase(subEntry);
5162    }
5163}
5164
5165static bool ValidateMemoryTypes(const layer_data *dev_data, const DEVICE_MEM_INFO *mem_info, const uint32_t memory_type_bits,
5166                                  const char *funcName) {
5167    bool skip_call = false;
5168    if (((1 << mem_info->allocInfo.memoryTypeIndex) & memory_type_bits) == 0) {
5169        skip_call = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5170                            reinterpret_cast<const uint64_t &>(mem_info->mem), __LINE__, MEMTRACK_INVALID_MEM_TYPE, "MT",
5171                            "%s(): MemoryRequirements->memoryTypeBits (0x%X) for this object type are not compatible with the memory "
5172                            "type (0x%X) of this memory object 0x%" PRIx64 ".",
5173                            funcName, memory_type_bits, mem_info->allocInfo.memoryTypeIndex,
5174                            reinterpret_cast<const uint64_t &>(mem_info->mem));
5175    }
5176    return skip_call;
5177}
5178
5179VKAPI_ATTR VkResult VKAPI_CALL
5180BindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
5181    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5182    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5183    std::unique_lock<std::mutex> lock(global_lock);
5184    // Track objects tied to memory
5185    uint64_t buffer_handle = (uint64_t)(buffer);
5186    bool skipCall =
5187        set_mem_binding(dev_data, mem, buffer_handle, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "vkBindBufferMemory");
5188    auto buffer_node = getBufferNode(dev_data, buffer);
5189    if (buffer_node) {
5190        buffer_node->mem = mem;
5191        VkMemoryRequirements memRequirements;
5192        dev_data->device_dispatch_table->GetBufferMemoryRequirements(device, buffer, &memRequirements);
5193
5194        // Track and validate bound memory range information
5195        auto mem_info = getMemObjInfo(dev_data, mem);
5196        if (mem_info) {
5197            const MEMORY_RANGE range =
5198                insert_memory_ranges(buffer_handle, mem, memoryOffset, memRequirements, mem_info->bufferRanges);
5199            skipCall |= validate_memory_range(dev_data, mem_info->imageRanges, range, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT);
5200            skipCall |= ValidateMemoryTypes(dev_data, mem_info, memRequirements.memoryTypeBits, "BindBufferMemory");
5201        }
5202
5203        // Validate memory requirements alignment
5204        if (vk_safe_modulo(memoryOffset, memRequirements.alignment) != 0) {
5205            skipCall |=
5206                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
5207                        __LINE__, DRAWSTATE_INVALID_BUFFER_MEMORY_OFFSET, "DS",
5208                        "vkBindBufferMemory(): memoryOffset is 0x%" PRIxLEAST64 " but must be an integer multiple of the "
5209                        "VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
5210                        ", returned from a call to vkGetBufferMemoryRequirements with buffer",
5211                        memoryOffset, memRequirements.alignment);
5212        }
5213        // Validate device limits alignments
5214        VkBufferUsageFlags usage = dev_data->bufferMap[buffer].get()->createInfo.usage;
5215        if (usage & (VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT)) {
5216            if (vk_safe_modulo(memoryOffset, dev_data->phys_dev_properties.properties.limits.minTexelBufferOffsetAlignment) != 0) {
5217                skipCall |=
5218                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
5219                            0, __LINE__, DRAWSTATE_INVALID_TEXEL_BUFFER_OFFSET, "DS",
5220                            "vkBindBufferMemory(): memoryOffset is 0x%" PRIxLEAST64 " but must be a multiple of "
5221                            "device limit minTexelBufferOffsetAlignment 0x%" PRIxLEAST64,
5222                            memoryOffset, dev_data->phys_dev_properties.properties.limits.minTexelBufferOffsetAlignment);
5223            }
5224        }
5225        if (usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) {
5226            if (vk_safe_modulo(memoryOffset, dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment) !=
5227                0) {
5228                skipCall |=
5229                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
5230                            0, __LINE__, DRAWSTATE_INVALID_UNIFORM_BUFFER_OFFSET, "DS",
5231                            "vkBindBufferMemory(): memoryOffset is 0x%" PRIxLEAST64 " but must be a multiple of "
5232                            "device limit minUniformBufferOffsetAlignment 0x%" PRIxLEAST64,
5233                            memoryOffset, dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment);
5234            }
5235        }
5236        if (usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) {
5237            if (vk_safe_modulo(memoryOffset, dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment) !=
5238                0) {
5239                skipCall |=
5240                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
5241                            0, __LINE__, DRAWSTATE_INVALID_STORAGE_BUFFER_OFFSET, "DS",
5242                            "vkBindBufferMemory(): memoryOffset is 0x%" PRIxLEAST64 " but must be a multiple of "
5243                            "device limit minStorageBufferOffsetAlignment 0x%" PRIxLEAST64,
5244                            memoryOffset, dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment);
5245            }
5246        }
5247    }
5248    print_mem_list(dev_data);
5249    lock.unlock();
5250    if (!skipCall) {
5251        result = dev_data->device_dispatch_table->BindBufferMemory(device, buffer, mem, memoryOffset);
5252    }
5253    return result;
5254}
5255
5256VKAPI_ATTR void VKAPI_CALL
5257GetBufferMemoryRequirements(VkDevice device, VkBuffer buffer, VkMemoryRequirements *pMemoryRequirements) {
5258    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5259    // TODO : What to track here?
5260    //   Could potentially save returned mem requirements and validate values passed into BindBufferMemory
5261    my_data->device_dispatch_table->GetBufferMemoryRequirements(device, buffer, pMemoryRequirements);
5262}
5263
5264VKAPI_ATTR void VKAPI_CALL
5265GetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements *pMemoryRequirements) {
5266    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5267    // TODO : What to track here?
5268    //   Could potentially save returned mem requirements and validate values passed into BindImageMemory
5269    my_data->device_dispatch_table->GetImageMemoryRequirements(device, image, pMemoryRequirements);
5270}
5271
5272VKAPI_ATTR void VKAPI_CALL
5273DestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator) {
5274    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5275        ->device_dispatch_table->DestroyImageView(device, imageView, pAllocator);
5276    // TODO : Clean up any internal data structures using this obj.
5277}
5278
5279VKAPI_ATTR void VKAPI_CALL
5280DestroyShaderModule(VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks *pAllocator) {
5281    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5282
5283    std::unique_lock<std::mutex> lock(global_lock);
5284    my_data->shaderModuleMap.erase(shaderModule);
5285    lock.unlock();
5286
5287    my_data->device_dispatch_table->DestroyShaderModule(device, shaderModule, pAllocator);
5288}
5289
5290VKAPI_ATTR void VKAPI_CALL
5291DestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) {
5292    get_my_data_ptr(get_dispatch_key(device), layer_data_map)->device_dispatch_table->DestroyPipeline(device, pipeline, pAllocator);
5293    // TODO : Clean up any internal data structures using this obj.
5294}
5295
5296VKAPI_ATTR void VKAPI_CALL
5297DestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks *pAllocator) {
5298    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5299        ->device_dispatch_table->DestroyPipelineLayout(device, pipelineLayout, pAllocator);
5300    // TODO : Clean up any internal data structures using this obj.
5301}
5302
5303VKAPI_ATTR void VKAPI_CALL
5304DestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) {
5305    get_my_data_ptr(get_dispatch_key(device), layer_data_map)->device_dispatch_table->DestroySampler(device, sampler, pAllocator);
5306    // TODO : Clean up any internal data structures using this obj.
5307}
5308
5309VKAPI_ATTR void VKAPI_CALL
5310DestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks *pAllocator) {
5311    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5312        ->device_dispatch_table->DestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator);
5313    // TODO : Clean up any internal data structures using this obj.
5314}
5315
5316VKAPI_ATTR void VKAPI_CALL
5317DestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks *pAllocator) {
5318    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5319        ->device_dispatch_table->DestroyDescriptorPool(device, descriptorPool, pAllocator);
5320    // TODO : Clean up any internal data structures using this obj.
5321}
5322// Verify cmdBuffer in given cb_node is not in global in-flight set, and return skip_call result
5323//  If this is a secondary command buffer, then make sure its primary is also in-flight
5324//  If primary is not in-flight, then remove secondary from global in-flight set
5325// This function is only valid at a point when cmdBuffer is being reset or freed
5326static bool checkAndClearCommandBufferInFlight(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const char *action) {
5327    bool skip_call = false;
5328    if (dev_data->globalInFlightCmdBuffers.count(cb_node->commandBuffer)) {
5329        // Primary CB or secondary where primary is also in-flight is an error
5330        if ((cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_SECONDARY) ||
5331            (dev_data->globalInFlightCmdBuffers.count(cb_node->primaryCommandBuffer))) {
5332            skip_call |= log_msg(
5333                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5334                reinterpret_cast<const uint64_t &>(cb_node->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
5335                "Attempt to %s command buffer (0x%" PRIxLEAST64 ") which is in use.", action,
5336                reinterpret_cast<const uint64_t &>(cb_node->commandBuffer));
5337        } else { // Secondary CB w/o primary in-flight, remove from in-flight
5338            dev_data->globalInFlightCmdBuffers.erase(cb_node->commandBuffer);
5339        }
5340    }
5341    return skip_call;
5342}
5343// Iterate over all cmdBuffers in given commandPool and verify that each is not in use
5344static bool checkAndClearCommandBuffersInFlight(layer_data *dev_data, const VkCommandPool commandPool, const char *action) {
5345    bool skip_call = false;
5346    auto pool_data = dev_data->commandPoolMap.find(commandPool);
5347    if (pool_data != dev_data->commandPoolMap.end()) {
5348        for (auto cmd_buffer : pool_data->second.commandBuffers) {
5349            if (dev_data->globalInFlightCmdBuffers.count(cmd_buffer)) {
5350                skip_call |= checkAndClearCommandBufferInFlight(dev_data, getCBNode(dev_data, cmd_buffer), action);
5351            }
5352        }
5353    }
5354    return skip_call;
5355}
5356
5357VKAPI_ATTR void VKAPI_CALL
5358FreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer *pCommandBuffers) {
5359    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5360
5361    bool skip_call = false;
5362    std::unique_lock<std::mutex> lock(global_lock);
5363    for (uint32_t i = 0; i < commandBufferCount; i++) {
5364        auto cb_pair = dev_data->commandBufferMap.find(pCommandBuffers[i]);
5365        skip_call |= checkAndClearCommandBufferInFlight(dev_data, cb_pair->second, "free");
5366        // Delete CB information structure, and remove from commandBufferMap
5367        if (cb_pair != dev_data->commandBufferMap.end()) {
5368            // reset prior to delete for data clean-up
5369            resetCB(dev_data, (*cb_pair).second->commandBuffer);
5370            delete (*cb_pair).second;
5371            dev_data->commandBufferMap.erase(cb_pair);
5372        }
5373
5374        // Remove commandBuffer reference from commandPoolMap
5375        dev_data->commandPoolMap[commandPool].commandBuffers.remove(pCommandBuffers[i]);
5376    }
5377    printCBList(dev_data);
5378    lock.unlock();
5379
5380    if (!skip_call)
5381        dev_data->device_dispatch_table->FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);
5382}
5383
5384VKAPI_ATTR VkResult VKAPI_CALL CreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
5385                                                 const VkAllocationCallbacks *pAllocator,
5386                                                 VkCommandPool *pCommandPool) {
5387    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5388
5389    VkResult result = dev_data->device_dispatch_table->CreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool);
5390
5391    if (VK_SUCCESS == result) {
5392        std::lock_guard<std::mutex> lock(global_lock);
5393        dev_data->commandPoolMap[*pCommandPool].createFlags = pCreateInfo->flags;
5394        dev_data->commandPoolMap[*pCommandPool].queueFamilyIndex = pCreateInfo->queueFamilyIndex;
5395    }
5396    return result;
5397}
5398
5399VKAPI_ATTR VkResult VKAPI_CALL CreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
5400                                               const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) {
5401
5402    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5403    VkResult result = dev_data->device_dispatch_table->CreateQueryPool(device, pCreateInfo, pAllocator, pQueryPool);
5404    if (result == VK_SUCCESS) {
5405        std::lock_guard<std::mutex> lock(global_lock);
5406        dev_data->queryPoolMap[*pQueryPool].createInfo = *pCreateInfo;
5407    }
5408    return result;
5409}
5410
5411// Destroy commandPool along with all of the commandBuffers allocated from that pool
5412VKAPI_ATTR void VKAPI_CALL
5413DestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
5414    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5415    bool skipCall = false;
5416    std::unique_lock<std::mutex> lock(global_lock);
5417    // Verify that command buffers in pool are complete (not in-flight)
5418    VkBool32 result = checkAndClearCommandBuffersInFlight(dev_data, commandPool, "destroy command pool with");
5419    // Must remove cmdpool from cmdpoolmap, after removing all cmdbuffers in its list from the commandPoolMap
5420    auto pool_it = dev_data->commandPoolMap.find(commandPool);
5421    if (pool_it != dev_data->commandPoolMap.end()) {
5422        for (auto cb : pool_it->second.commandBuffers) {
5423            clear_cmd_buf_and_mem_references(dev_data, cb);
5424            auto del_cb = dev_data->commandBufferMap.find(cb);
5425            delete del_cb->second;                  // delete CB info structure
5426            dev_data->commandBufferMap.erase(del_cb); // Remove this command buffer
5427        }
5428    }
5429    dev_data->commandPoolMap.erase(commandPool);
5430
5431    lock.unlock();
5432
5433    if (result)
5434        return;
5435
5436    if (!skipCall)
5437        dev_data->device_dispatch_table->DestroyCommandPool(device, commandPool, pAllocator);
5438}
5439
5440VKAPI_ATTR VkResult VKAPI_CALL
5441ResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) {
5442    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5443    bool skipCall = false;
5444    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5445
5446    if (checkAndClearCommandBuffersInFlight(dev_data, commandPool, "reset command pool with"))
5447        return VK_ERROR_VALIDATION_FAILED_EXT;
5448
5449    if (!skipCall)
5450        result = dev_data->device_dispatch_table->ResetCommandPool(device, commandPool, flags);
5451
5452    // Reset all of the CBs allocated from this pool
5453    if (VK_SUCCESS == result) {
5454        std::lock_guard<std::mutex> lock(global_lock);
5455        auto it = dev_data->commandPoolMap[commandPool].commandBuffers.begin();
5456        while (it != dev_data->commandPoolMap[commandPool].commandBuffers.end()) {
5457            resetCB(dev_data, (*it));
5458            ++it;
5459        }
5460    }
5461    return result;
5462}
5463
5464VKAPI_ATTR VkResult VKAPI_CALL ResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) {
5465    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5466    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5467    bool skipCall = false;
5468    std::unique_lock<std::mutex> lock(global_lock);
5469    for (uint32_t i = 0; i < fenceCount; ++i) {
5470        auto pFence = getFenceNode(dev_data, pFences[i]);
5471        if (pFence) {
5472            pFence->needsSignaled = true;
5473            pFence->queues.clear();
5474            pFence->priorFences.clear();
5475            if (pFence->in_use.load()) {
5476                skipCall |=
5477                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5478                            reinterpret_cast<const uint64_t &>(pFences[i]), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
5479                            "Fence 0x%" PRIx64 " is in use by a command buffer.", reinterpret_cast<const uint64_t &>(pFences[i]));
5480            }
5481        }
5482    }
5483    lock.unlock();
5484    if (!skipCall)
5485        result = dev_data->device_dispatch_table->ResetFences(device, fenceCount, pFences);
5486    return result;
5487}
5488
5489VKAPI_ATTR void VKAPI_CALL
5490DestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) {
5491    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5492    std::unique_lock<std::mutex> lock(global_lock);
5493    auto fbNode = dev_data->frameBufferMap.find(framebuffer);
5494    if (fbNode != dev_data->frameBufferMap.end()) {
5495        for (auto cb : fbNode->second.referencingCmdBuffers) {
5496            auto cbNode = dev_data->commandBufferMap.find(cb);
5497            if (cbNode != dev_data->commandBufferMap.end()) {
5498                // Set CB as invalid and record destroyed framebuffer
5499                cbNode->second->state = CB_INVALID;
5500                cbNode->second->destroyedFramebuffers.insert(framebuffer);
5501            }
5502        }
5503        delete [] fbNode->second.createInfo.pAttachments;
5504        dev_data->frameBufferMap.erase(fbNode);
5505    }
5506    lock.unlock();
5507    dev_data->device_dispatch_table->DestroyFramebuffer(device, framebuffer, pAllocator);
5508}
5509
5510VKAPI_ATTR void VKAPI_CALL
5511DestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) {
5512    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5513    dev_data->device_dispatch_table->DestroyRenderPass(device, renderPass, pAllocator);
5514    std::lock_guard<std::mutex> lock(global_lock);
5515    dev_data->renderPassMap.erase(renderPass);
5516}
5517
5518VKAPI_ATTR VkResult VKAPI_CALL CreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo,
5519                                            const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) {
5520    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5521
5522    VkResult result = dev_data->device_dispatch_table->CreateBuffer(device, pCreateInfo, pAllocator, pBuffer);
5523
5524    if (VK_SUCCESS == result) {
5525        std::lock_guard<std::mutex> lock(global_lock);
5526        // TODO : This doesn't create deep copy of pQueueFamilyIndices so need to fix that if/when we want that data to be valid
5527        dev_data->bufferMap.insert(std::make_pair(*pBuffer, unique_ptr<BUFFER_NODE>(new BUFFER_NODE(pCreateInfo))));
5528    }
5529    return result;
5530}
5531
5532VKAPI_ATTR VkResult VKAPI_CALL CreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo,
5533                                                const VkAllocationCallbacks *pAllocator, VkBufferView *pView) {
5534    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5535    VkResult result = dev_data->device_dispatch_table->CreateBufferView(device, pCreateInfo, pAllocator, pView);
5536    if (VK_SUCCESS == result) {
5537        std::lock_guard<std::mutex> lock(global_lock);
5538        dev_data->bufferViewMap[*pView] = unique_ptr<VkBufferViewCreateInfo>(new VkBufferViewCreateInfo(*pCreateInfo));
5539        // In order to create a valid buffer view, the buffer must have been created with at least one of the
5540        // following flags:  UNIFORM_TEXEL_BUFFER_BIT or STORAGE_TEXEL_BUFFER_BIT
5541        validate_buffer_usage_flags(dev_data, pCreateInfo->buffer,
5542                                    VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT, false,
5543                                    "vkCreateBufferView()", "VK_BUFFER_USAGE_[STORAGE|UNIFORM]_TEXEL_BUFFER_BIT");
5544    }
5545    return result;
5546}
5547
5548VKAPI_ATTR VkResult VKAPI_CALL CreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo,
5549                                           const VkAllocationCallbacks *pAllocator, VkImage *pImage) {
5550    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5551
5552    VkResult result = dev_data->device_dispatch_table->CreateImage(device, pCreateInfo, pAllocator, pImage);
5553
5554    if (VK_SUCCESS == result) {
5555        std::lock_guard<std::mutex> lock(global_lock);
5556        IMAGE_LAYOUT_NODE image_node;
5557        image_node.layout = pCreateInfo->initialLayout;
5558        image_node.format = pCreateInfo->format;
5559        dev_data->imageMap.insert(std::make_pair(*pImage, unique_ptr<IMAGE_NODE>(new IMAGE_NODE(pCreateInfo))));
5560        ImageSubresourcePair subpair = {*pImage, false, VkImageSubresource()};
5561        dev_data->imageSubresourceMap[*pImage].push_back(subpair);
5562        dev_data->imageLayoutMap[subpair] = image_node;
5563    }
5564    return result;
5565}
5566
5567static void ResolveRemainingLevelsLayers(layer_data *dev_data, VkImageSubresourceRange *range, VkImage image) {
5568    /* expects global_lock to be held by caller */
5569
5570    auto image_node = getImageNode(dev_data, image);
5571    if (image_node) {
5572        /* If the caller used the special values VK_REMAINING_MIP_LEVELS and
5573         * VK_REMAINING_ARRAY_LAYERS, resolve them now in our internal state to
5574         * the actual values.
5575         */
5576        if (range->levelCount == VK_REMAINING_MIP_LEVELS) {
5577            range->levelCount = image_node->createInfo.mipLevels - range->baseMipLevel;
5578        }
5579
5580        if (range->layerCount == VK_REMAINING_ARRAY_LAYERS) {
5581            range->layerCount = image_node->createInfo.arrayLayers - range->baseArrayLayer;
5582        }
5583    }
5584}
5585
5586// Return the correct layer/level counts if the caller used the special
5587// values VK_REMAINING_MIP_LEVELS or VK_REMAINING_ARRAY_LAYERS.
5588static void ResolveRemainingLevelsLayers(layer_data *dev_data, uint32_t *levels, uint32_t *layers, VkImageSubresourceRange range,
5589                                         VkImage image) {
5590    /* expects global_lock to be held by caller */
5591
5592    *levels = range.levelCount;
5593    *layers = range.layerCount;
5594    auto image_node = getImageNode(dev_data, image);
5595    if (image_node) {
5596        if (range.levelCount == VK_REMAINING_MIP_LEVELS) {
5597            *levels = image_node->createInfo.mipLevels - range.baseMipLevel;
5598        }
5599        if (range.layerCount == VK_REMAINING_ARRAY_LAYERS) {
5600            *layers = image_node->createInfo.arrayLayers - range.baseArrayLayer;
5601        }
5602    }
5603}
5604
5605VKAPI_ATTR VkResult VKAPI_CALL CreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo,
5606                                               const VkAllocationCallbacks *pAllocator, VkImageView *pView) {
5607    bool skipCall = false;
5608    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5609    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5610    {
5611        // Validate that img has correct usage flags set
5612        std::lock_guard<std::mutex> lock(global_lock);
5613        skipCall |= validate_image_usage_flags(dev_data, pCreateInfo->image,
5614                VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_STORAGE_BIT |
5615                VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
5616                false, "vkCreateImageView()", "VK_IMAGE_USAGE_[SAMPLED|STORAGE|COLOR_ATTACHMENT]_BIT");
5617    }
5618
5619    if (!skipCall) {
5620        result = dev_data->device_dispatch_table->CreateImageView(device, pCreateInfo, pAllocator, pView);
5621    }
5622
5623    if (VK_SUCCESS == result) {
5624        std::lock_guard<std::mutex> lock(global_lock);
5625        dev_data->imageViewMap[*pView] = unique_ptr<VkImageViewCreateInfo>(new VkImageViewCreateInfo(*pCreateInfo));
5626        ResolveRemainingLevelsLayers(dev_data, &dev_data->imageViewMap[*pView].get()->subresourceRange, pCreateInfo->image);
5627    }
5628
5629    return result;
5630}
5631
5632VKAPI_ATTR VkResult VKAPI_CALL
5633CreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkFence *pFence) {
5634    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5635    VkResult result = dev_data->device_dispatch_table->CreateFence(device, pCreateInfo, pAllocator, pFence);
5636    if (VK_SUCCESS == result) {
5637        std::lock_guard<std::mutex> lock(global_lock);
5638        auto &fence_node = dev_data->fenceMap[*pFence];
5639        fence_node.fence = *pFence;
5640        fence_node.createInfo = *pCreateInfo;
5641        fence_node.needsSignaled = true;
5642        if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) {
5643            fence_node.firstTimeFlag = true;
5644            fence_node.needsSignaled = false;
5645        }
5646        fence_node.in_use.store(0);
5647    }
5648    return result;
5649}
5650
5651// TODO handle pipeline caches
5652VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo *pCreateInfo,
5653                                                   const VkAllocationCallbacks *pAllocator, VkPipelineCache *pPipelineCache) {
5654    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5655    VkResult result = dev_data->device_dispatch_table->CreatePipelineCache(device, pCreateInfo, pAllocator, pPipelineCache);
5656    return result;
5657}
5658
5659VKAPI_ATTR void VKAPI_CALL
5660DestroyPipelineCache(VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks *pAllocator) {
5661    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5662    dev_data->device_dispatch_table->DestroyPipelineCache(device, pipelineCache, pAllocator);
5663}
5664
5665VKAPI_ATTR VkResult VKAPI_CALL
5666GetPipelineCacheData(VkDevice device, VkPipelineCache pipelineCache, size_t *pDataSize, void *pData) {
5667    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5668    VkResult result = dev_data->device_dispatch_table->GetPipelineCacheData(device, pipelineCache, pDataSize, pData);
5669    return result;
5670}
5671
5672VKAPI_ATTR VkResult VKAPI_CALL
5673MergePipelineCaches(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache *pSrcCaches) {
5674    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5675    VkResult result = dev_data->device_dispatch_table->MergePipelineCaches(device, dstCache, srcCacheCount, pSrcCaches);
5676    return result;
5677}
5678
5679// utility function to set collective state for pipeline
5680void set_pipeline_state(PIPELINE_NODE *pPipe) {
5681    // If any attachment used by this pipeline has blendEnable, set top-level blendEnable
5682    if (pPipe->graphicsPipelineCI.pColorBlendState) {
5683        for (size_t i = 0; i < pPipe->attachments.size(); ++i) {
5684            if (VK_TRUE == pPipe->attachments[i].blendEnable) {
5685                if (((pPipe->attachments[i].dstAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
5686                     (pPipe->attachments[i].dstAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
5687                    ((pPipe->attachments[i].dstColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
5688                     (pPipe->attachments[i].dstColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
5689                    ((pPipe->attachments[i].srcAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
5690                     (pPipe->attachments[i].srcAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
5691                    ((pPipe->attachments[i].srcColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
5692                     (pPipe->attachments[i].srcColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA))) {
5693                    pPipe->blendConstantsEnabled = true;
5694                }
5695            }
5696        }
5697    }
5698}
5699
5700VKAPI_ATTR VkResult VKAPI_CALL
5701CreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
5702                        const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
5703                        VkPipeline *pPipelines) {
5704    VkResult result = VK_SUCCESS;
5705    // TODO What to do with pipelineCache?
5706    // The order of operations here is a little convoluted but gets the job done
5707    //  1. Pipeline create state is first shadowed into PIPELINE_NODE struct
5708    //  2. Create state is then validated (which uses flags setup during shadowing)
5709    //  3. If everything looks good, we'll then create the pipeline and add NODE to pipelineMap
5710    bool skipCall = false;
5711    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
5712    vector<PIPELINE_NODE *> pPipeNode(count);
5713    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5714
5715    uint32_t i = 0;
5716    std::unique_lock<std::mutex> lock(global_lock);
5717
5718    for (i = 0; i < count; i++) {
5719        pPipeNode[i] = new PIPELINE_NODE;
5720        pPipeNode[i]->initGraphicsPipeline(&pCreateInfos[i]);
5721        pPipeNode[i]->renderPass = getRenderPass(dev_data, pCreateInfos[i].renderPass);
5722        pPipeNode[i]->pipelineLayout = getPipelineLayout(dev_data, pCreateInfos[i].layout);
5723
5724        skipCall |= verifyPipelineCreateState(dev_data, device, pPipeNode, i);
5725    }
5726
5727    if (!skipCall) {
5728        lock.unlock();
5729        result = dev_data->device_dispatch_table->CreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator,
5730                                                                          pPipelines);
5731        lock.lock();
5732        for (i = 0; i < count; i++) {
5733            pPipeNode[i]->pipeline = pPipelines[i];
5734            dev_data->pipelineMap[pPipeNode[i]->pipeline] = pPipeNode[i];
5735        }
5736        lock.unlock();
5737    } else {
5738        for (i = 0; i < count; i++) {
5739            delete pPipeNode[i];
5740        }
5741        lock.unlock();
5742        return VK_ERROR_VALIDATION_FAILED_EXT;
5743    }
5744    return result;
5745}
5746
5747VKAPI_ATTR VkResult VKAPI_CALL
5748CreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
5749                       const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
5750                       VkPipeline *pPipelines) {
5751    VkResult result = VK_SUCCESS;
5752    bool skipCall = false;
5753
5754    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
5755    vector<PIPELINE_NODE *> pPipeNode(count);
5756    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5757
5758    uint32_t i = 0;
5759    std::unique_lock<std::mutex> lock(global_lock);
5760    for (i = 0; i < count; i++) {
5761        // TODO: Verify compute stage bits
5762
5763        // Create and initialize internal tracking data structure
5764        pPipeNode[i] = new PIPELINE_NODE;
5765        pPipeNode[i]->initComputePipeline(&pCreateInfos[i]);
5766        pPipeNode[i]->pipelineLayout = getPipelineLayout(dev_data, pCreateInfos[i].layout);
5767        // memcpy(&pPipeNode[i]->computePipelineCI, (const void *)&pCreateInfos[i], sizeof(VkComputePipelineCreateInfo));
5768
5769        // TODO: Add Compute Pipeline Verification
5770        skipCall |= !validate_compute_pipeline(dev_data->report_data, pPipeNode[i],
5771                                               &dev_data->phys_dev_properties.features,
5772                                               dev_data->shaderModuleMap);
5773        // skipCall |= verifyPipelineCreateState(dev_data, device, pPipeNode[i]);
5774    }
5775
5776    if (!skipCall) {
5777        lock.unlock();
5778        result = dev_data->device_dispatch_table->CreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator,
5779                                                                         pPipelines);
5780        lock.lock();
5781        for (i = 0; i < count; i++) {
5782            pPipeNode[i]->pipeline = pPipelines[i];
5783            dev_data->pipelineMap[pPipeNode[i]->pipeline] = pPipeNode[i];
5784        }
5785        lock.unlock();
5786    } else {
5787        for (i = 0; i < count; i++) {
5788            // Clean up any locally allocated data structures
5789            delete pPipeNode[i];
5790        }
5791        lock.unlock();
5792        return VK_ERROR_VALIDATION_FAILED_EXT;
5793    }
5794    return result;
5795}
5796
5797VKAPI_ATTR VkResult VKAPI_CALL CreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo,
5798                                             const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) {
5799    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5800    VkResult result = dev_data->device_dispatch_table->CreateSampler(device, pCreateInfo, pAllocator, pSampler);
5801    if (VK_SUCCESS == result) {
5802        std::lock_guard<std::mutex> lock(global_lock);
5803        dev_data->samplerMap[*pSampler] = unique_ptr<SAMPLER_NODE>(new SAMPLER_NODE(pSampler, pCreateInfo));
5804    }
5805    return result;
5806}
5807
5808VKAPI_ATTR VkResult VKAPI_CALL
5809CreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
5810                          const VkAllocationCallbacks *pAllocator, VkDescriptorSetLayout *pSetLayout) {
5811    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5812    VkResult result = dev_data->device_dispatch_table->CreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout);
5813    if (VK_SUCCESS == result) {
5814        // TODOSC : Capture layout bindings set
5815        std::lock_guard<std::mutex> lock(global_lock);
5816        dev_data->descriptorSetLayoutMap[*pSetLayout] =
5817            new cvdescriptorset::DescriptorSetLayout(dev_data->report_data, pCreateInfo, *pSetLayout);
5818    }
5819    return result;
5820}
5821
5822// Used by CreatePipelineLayout and CmdPushConstants.
5823// Note that the index argument is optional and only used by CreatePipelineLayout.
5824static bool validatePushConstantRange(const layer_data *dev_data, const uint32_t offset, const uint32_t size,
5825                                      const char *caller_name, uint32_t index = 0) {
5826    uint32_t const maxPushConstantsSize = dev_data->phys_dev_properties.properties.limits.maxPushConstantsSize;
5827    bool skipCall = false;
5828    // Check that offset + size don't exceed the max.
5829    // Prevent arithetic overflow here by avoiding addition and testing in this order.
5830    if ((offset >= maxPushConstantsSize) || (size > maxPushConstantsSize - offset)) {
5831        // This is a pain just to adapt the log message to the caller, but better to sort it out only when there is a problem.
5832        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
5833            skipCall |=
5834                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5835                        DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants index %u with offset %u and size %u that "
5836                                                              "exceeds this device's maxPushConstantSize of %u.",
5837                        caller_name, index, offset, size, maxPushConstantsSize);
5838        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
5839            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5840                                DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with offset %u and size %u that "
5841                                                                      "exceeds this device's maxPushConstantSize of %u.",
5842                                caller_name, offset, size, maxPushConstantsSize);
5843        } else {
5844            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5845                                DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
5846        }
5847    }
5848    // size needs to be non-zero and a multiple of 4.
5849    if ((size == 0) || ((size & 0x3) != 0)) {
5850        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
5851            skipCall |=
5852                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5853                        DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants index %u with "
5854                                                              "size %u. Size must be greater than zero and a multiple of 4.",
5855                        caller_name, index, size);
5856        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
5857            skipCall |=
5858                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5859                        DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with "
5860                                                              "size %u. Size must be greater than zero and a multiple of 4.",
5861                        caller_name, size);
5862        } else {
5863            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5864                                DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
5865        }
5866    }
5867    // offset needs to be a multiple of 4.
5868    if ((offset & 0x3) != 0) {
5869        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
5870            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5871                                DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants index %u with "
5872                                                                      "offset %u. Offset must be a multiple of 4.",
5873                                caller_name, index, offset);
5874        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
5875            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5876                                DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with "
5877                                                                      "offset %u. Offset must be a multiple of 4.",
5878                                caller_name, offset);
5879        } else {
5880            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5881                                DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
5882        }
5883    }
5884    return skipCall;
5885}
5886
5887VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
5888                                                    const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout) {
5889    bool skipCall = false;
5890    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5891    // Push Constant Range checks
5892    uint32_t i = 0;
5893    for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
5894        skipCall |= validatePushConstantRange(dev_data, pCreateInfo->pPushConstantRanges[i].offset,
5895                                              pCreateInfo->pPushConstantRanges[i].size, "vkCreatePipelineLayout()", i);
5896        if (0 == pCreateInfo->pPushConstantRanges[i].stageFlags) {
5897            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5898                                DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCreatePipelineLayout() call has no stageFlags set.");
5899        }
5900    }
5901    // Each range has been validated.  Now check for overlap between ranges (if they are good).
5902    if (!skipCall) {
5903        uint32_t i, j;
5904        for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
5905            for (j = i + 1; j < pCreateInfo->pushConstantRangeCount; ++j) {
5906                const uint32_t minA = pCreateInfo->pPushConstantRanges[i].offset;
5907                const uint32_t maxA = minA + pCreateInfo->pPushConstantRanges[i].size;
5908                const uint32_t minB = pCreateInfo->pPushConstantRanges[j].offset;
5909                const uint32_t maxB = minB + pCreateInfo->pPushConstantRanges[j].size;
5910                if ((minA <= minB && maxA > minB) || (minB <= minA && maxB > minA)) {
5911                    skipCall |=
5912                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5913                                DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCreatePipelineLayout() call has push constants with "
5914                                                                      "overlapping ranges: %u:[%u, %u), %u:[%u, %u)",
5915                                i, minA, maxA, j, minB, maxB);
5916                }
5917            }
5918        }
5919    }
5920
5921    if (skipCall)
5922        return VK_ERROR_VALIDATION_FAILED_EXT;
5923
5924    VkResult result = dev_data->device_dispatch_table->CreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout);
5925    if (VK_SUCCESS == result) {
5926        std::lock_guard<std::mutex> lock(global_lock);
5927        PIPELINE_LAYOUT_NODE &plNode = dev_data->pipelineLayoutMap[*pPipelineLayout];
5928        plNode.descriptorSetLayouts.resize(pCreateInfo->setLayoutCount);
5929        plNode.setLayouts.resize(pCreateInfo->setLayoutCount);
5930        for (i = 0; i < pCreateInfo->setLayoutCount; ++i) {
5931            plNode.descriptorSetLayouts[i] = pCreateInfo->pSetLayouts[i];
5932            plNode.setLayouts[i] = getDescriptorSetLayout(dev_data, pCreateInfo->pSetLayouts[i]);
5933        }
5934        plNode.pushConstantRanges.resize(pCreateInfo->pushConstantRangeCount);
5935        for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
5936            plNode.pushConstantRanges[i] = pCreateInfo->pPushConstantRanges[i];
5937        }
5938    }
5939    return result;
5940}
5941
5942VKAPI_ATTR VkResult VKAPI_CALL
5943CreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
5944                     VkDescriptorPool *pDescriptorPool) {
5945    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5946    VkResult result = dev_data->device_dispatch_table->CreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool);
5947    if (VK_SUCCESS == result) {
5948        // Insert this pool into Global Pool LL at head
5949        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
5950                    (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS", "Created Descriptor Pool 0x%" PRIxLEAST64,
5951                    (uint64_t)*pDescriptorPool))
5952            return VK_ERROR_VALIDATION_FAILED_EXT;
5953        DESCRIPTOR_POOL_NODE *pNewNode = new DESCRIPTOR_POOL_NODE(*pDescriptorPool, pCreateInfo);
5954        if (NULL == pNewNode) {
5955            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
5956                        (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
5957                        "Out of memory while attempting to allocate DESCRIPTOR_POOL_NODE in vkCreateDescriptorPool()"))
5958                return VK_ERROR_VALIDATION_FAILED_EXT;
5959        } else {
5960            std::lock_guard<std::mutex> lock(global_lock);
5961            dev_data->descriptorPoolMap[*pDescriptorPool] = pNewNode;
5962        }
5963    } else {
5964        // Need to do anything if pool create fails?
5965    }
5966    return result;
5967}
5968
5969VKAPI_ATTR VkResult VKAPI_CALL
5970ResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags) {
5971    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5972    VkResult result = dev_data->device_dispatch_table->ResetDescriptorPool(device, descriptorPool, flags);
5973    if (VK_SUCCESS == result) {
5974        std::lock_guard<std::mutex> lock(global_lock);
5975        clearDescriptorPool(dev_data, device, descriptorPool, flags);
5976    }
5977    return result;
5978}
5979// Ensure the pool contains enough descriptors and descriptor sets to satisfy
5980// an allocation request. Fills common_data with the total number of descriptors of each type required,
5981// as well as DescriptorSetLayout ptrs used for later update.
5982static bool PreCallValidateAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo,
5983                                                  cvdescriptorset::AllocateDescriptorSetsData *common_data) {
5984    // All state checks for AllocateDescriptorSets is done in single function
5985    return cvdescriptorset::ValidateAllocateDescriptorSets(dev_data->report_data, pAllocateInfo, dev_data, common_data);
5986}
5987// Allocation state was good and call down chain was made so update state based on allocating descriptor sets
5988static void PostCallRecordAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo,
5989                                                 VkDescriptorSet *pDescriptorSets,
5990                                                 const cvdescriptorset::AllocateDescriptorSetsData *common_data) {
5991    // All the updates are contained in a single cvdescriptorset function
5992    cvdescriptorset::PerformAllocateDescriptorSets(pAllocateInfo, pDescriptorSets, common_data, &dev_data->descriptorPoolMap,
5993                                                   &dev_data->setMap, dev_data);
5994}
5995
5996VKAPI_ATTR VkResult VKAPI_CALL
5997AllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo, VkDescriptorSet *pDescriptorSets) {
5998    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5999    std::unique_lock<std::mutex> lock(global_lock);
6000    cvdescriptorset::AllocateDescriptorSetsData common_data(pAllocateInfo->descriptorSetCount);
6001    bool skip_call = PreCallValidateAllocateDescriptorSets(dev_data, pAllocateInfo, &common_data);
6002    lock.unlock();
6003
6004    if (skip_call)
6005        return VK_ERROR_VALIDATION_FAILED_EXT;
6006
6007    VkResult result = dev_data->device_dispatch_table->AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
6008
6009    if (VK_SUCCESS == result) {
6010        lock.lock();
6011        PostCallRecordAllocateDescriptorSets(dev_data, pAllocateInfo, pDescriptorSets, &common_data);
6012        lock.unlock();
6013    }
6014    return result;
6015}
6016// Verify state before freeing DescriptorSets
6017static bool PreCallValidateFreeDescriptorSets(const layer_data *dev_data, VkDescriptorPool pool, uint32_t count,
6018                                              const VkDescriptorSet *descriptor_sets) {
6019    bool skip_call = false;
6020    // First make sure sets being destroyed are not currently in-use
6021    for (uint32_t i = 0; i < count; ++i)
6022        skip_call |= validateIdleDescriptorSet(dev_data, descriptor_sets[i], "vkFreeDescriptorSets");
6023
6024    DESCRIPTOR_POOL_NODE *pool_node = getPoolNode(dev_data, pool);
6025    if (pool_node && !(VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT & pool_node->createInfo.flags)) {
6026        // Can't Free from a NON_FREE pool
6027        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6028                             reinterpret_cast<uint64_t &>(pool), __LINE__, DRAWSTATE_CANT_FREE_FROM_NON_FREE_POOL, "DS",
6029                             "It is invalid to call vkFreeDescriptorSets() with a pool created without setting "
6030                             "VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT.");
6031    }
6032    return skip_call;
6033}
6034// Sets have been removed from the pool so update underlying state
6035static void PostCallRecordFreeDescriptorSets(layer_data *dev_data, VkDescriptorPool pool, uint32_t count,
6036                                             const VkDescriptorSet *descriptor_sets) {
6037    DESCRIPTOR_POOL_NODE *pool_state = getPoolNode(dev_data, pool);
6038    // Update available descriptor sets in pool
6039    pool_state->availableSets += count;
6040
6041    // For each freed descriptor add its resources back into the pool as available and remove from pool and setMap
6042    for (uint32_t i = 0; i < count; ++i) {
6043        auto set_state = dev_data->setMap[descriptor_sets[i]];
6044        uint32_t type_index = 0, descriptor_count = 0;
6045        for (uint32_t j = 0; j < set_state->GetBindingCount(); ++j) {
6046            type_index = static_cast<uint32_t>(set_state->GetTypeFromIndex(j));
6047            descriptor_count = set_state->GetDescriptorCountFromIndex(j);
6048            pool_state->availableDescriptorTypeCount[type_index] += descriptor_count;
6049        }
6050        freeDescriptorSet(dev_data, set_state);
6051        pool_state->sets.erase(set_state);
6052    }
6053}
6054
6055VKAPI_ATTR VkResult VKAPI_CALL
6056FreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count, const VkDescriptorSet *pDescriptorSets) {
6057    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6058    // Make sure that no sets being destroyed are in-flight
6059    std::unique_lock<std::mutex> lock(global_lock);
6060    bool skipCall = PreCallValidateFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
6061    lock.unlock();
6062    if (skipCall)
6063        return VK_ERROR_VALIDATION_FAILED_EXT;
6064    VkResult result = dev_data->device_dispatch_table->FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets);
6065    if (VK_SUCCESS == result) {
6066        lock.lock();
6067        PostCallRecordFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
6068        lock.unlock();
6069    }
6070    return result;
6071}
6072// TODO : This is a Proof-of-concept for core validation architecture
6073//  Really we'll want to break out these functions to separate files but
6074//  keeping it all together here to prove out design
6075// PreCallValidate* handles validating all of the state prior to calling down chain to UpdateDescriptorSets()
6076static bool PreCallValidateUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
6077                                                const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
6078                                                const VkCopyDescriptorSet *pDescriptorCopies) {
6079    // First thing to do is perform map look-ups.
6080    // NOTE : UpdateDescriptorSets is somewhat unique in that it's operating on a number of DescriptorSets
6081    //  so we can't just do a single map look-up up-front, but do them individually in functions below
6082
6083    // Now make call(s) that validate state, but don't perform state updates in this function
6084    // Note, here DescriptorSets is unique in that we don't yet have an instance. Using a helper function in the
6085    //  namespace which will parse params and make calls into specific class instances
6086    return cvdescriptorset::ValidateUpdateDescriptorSets(dev_data->report_data, dev_data, descriptorWriteCount, pDescriptorWrites,
6087                                                         descriptorCopyCount, pDescriptorCopies);
6088}
6089// PostCallRecord* handles recording state updates following call down chain to UpdateDescriptorSets()
6090static void PostCallRecordUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
6091                                               const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
6092                                               const VkCopyDescriptorSet *pDescriptorCopies) {
6093    cvdescriptorset::PerformUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6094                                                 pDescriptorCopies);
6095}
6096
6097VKAPI_ATTR void VKAPI_CALL
6098UpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites,
6099                     uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pDescriptorCopies) {
6100    // Only map look-up at top level is for device-level layer_data
6101    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6102    std::unique_lock<std::mutex> lock(global_lock);
6103    bool skip_call = PreCallValidateUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6104                                                         pDescriptorCopies);
6105    lock.unlock();
6106    if (!skip_call) {
6107        dev_data->device_dispatch_table->UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6108                                                              pDescriptorCopies);
6109        lock.lock();
6110        // Since UpdateDescriptorSets() is void, nothing to check prior to updating state
6111        PostCallRecordUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6112                                           pDescriptorCopies);
6113    }
6114}
6115
6116VKAPI_ATTR VkResult VKAPI_CALL
6117AllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pCreateInfo, VkCommandBuffer *pCommandBuffer) {
6118    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6119    VkResult result = dev_data->device_dispatch_table->AllocateCommandBuffers(device, pCreateInfo, pCommandBuffer);
6120    if (VK_SUCCESS == result) {
6121        std::unique_lock<std::mutex> lock(global_lock);
6122        auto const &cp_it = dev_data->commandPoolMap.find(pCreateInfo->commandPool);
6123        if (cp_it != dev_data->commandPoolMap.end()) {
6124            for (uint32_t i = 0; i < pCreateInfo->commandBufferCount; i++) {
6125                // Add command buffer to its commandPool map
6126                cp_it->second.commandBuffers.push_back(pCommandBuffer[i]);
6127                GLOBAL_CB_NODE *pCB = new GLOBAL_CB_NODE;
6128                // Add command buffer to map
6129                dev_data->commandBufferMap[pCommandBuffer[i]] = pCB;
6130                resetCB(dev_data, pCommandBuffer[i]);
6131                pCB->createInfo = *pCreateInfo;
6132                pCB->device = device;
6133            }
6134        }
6135        printCBList(dev_data);
6136        lock.unlock();
6137    }
6138    return result;
6139}
6140
6141VKAPI_ATTR VkResult VKAPI_CALL
6142BeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) {
6143    bool skipCall = false;
6144    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6145    std::unique_lock<std::mutex> lock(global_lock);
6146    // Validate command buffer level
6147    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6148    if (pCB) {
6149        // This implicitly resets the Cmd Buffer so make sure any fence is done and then clear memory references
6150        if (dev_data->globalInFlightCmdBuffers.count(commandBuffer)) {
6151            skipCall |=
6152                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6153                        (uint64_t)commandBuffer, __LINE__, MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM",
6154                        "Calling vkBeginCommandBuffer() on active CB 0x%p before it has completed. "
6155                        "You must check CB fence before this call.",
6156                        commandBuffer);
6157        }
6158        clear_cmd_buf_and_mem_references(dev_data, pCB);
6159        if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
6160            // Secondary Command Buffer
6161            const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo;
6162            if (!pInfo) {
6163                skipCall |=
6164                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6165                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6166                            "vkBeginCommandBuffer(): Secondary Command Buffer (0x%p) must have inheritance info.",
6167                            reinterpret_cast<void *>(commandBuffer));
6168            } else {
6169                if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
6170                    if (!pInfo->renderPass) { // renderpass should NOT be null for a Secondary CB
6171                        skipCall |= log_msg(
6172                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6173                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6174                            "vkBeginCommandBuffer(): Secondary Command Buffers (0x%p) must specify a valid renderpass parameter.",
6175                            reinterpret_cast<void *>(commandBuffer));
6176                    }
6177                    if (!pInfo->framebuffer) { // framebuffer may be null for a Secondary CB, but this affects perf
6178                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
6179                                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6180                                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE,
6181                                            "DS", "vkBeginCommandBuffer(): Secondary Command Buffers (0x%p) may perform better if a "
6182                                                  "valid framebuffer parameter is specified.",
6183                                            reinterpret_cast<void *>(commandBuffer));
6184                    } else {
6185                        string errorString = "";
6186                        auto framebuffer = getFramebuffer(dev_data, pInfo->framebuffer);
6187                        if (framebuffer) {
6188                            VkRenderPass fbRP = framebuffer->createInfo.renderPass;
6189                            if (!verify_renderpass_compatibility(dev_data, fbRP, pInfo->renderPass, errorString)) {
6190                                // renderPass that framebuffer was created with must be compatible with local renderPass
6191                                skipCall |=
6192                                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6193                                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6194                                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE,
6195                                            "DS", "vkBeginCommandBuffer(): Secondary Command "
6196                                                  "Buffer (0x%p) renderPass (0x%" PRIxLEAST64 ") is incompatible w/ framebuffer "
6197                                                  "(0x%" PRIxLEAST64 ") w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
6198                                            reinterpret_cast<void *>(commandBuffer), (uint64_t)(pInfo->renderPass),
6199                                            (uint64_t)(pInfo->framebuffer), (uint64_t)(fbRP), errorString.c_str());
6200                            }
6201                            // Connect this framebuffer to this cmdBuffer
6202                            framebuffer->referencingCmdBuffers.insert(pCB->commandBuffer);
6203                        }
6204                    }
6205                }
6206                if ((pInfo->occlusionQueryEnable == VK_FALSE ||
6207                     dev_data->phys_dev_properties.features.occlusionQueryPrecise == VK_FALSE) &&
6208                    (pInfo->queryFlags & VK_QUERY_CONTROL_PRECISE_BIT)) {
6209                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6210                                        VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(commandBuffer),
6211                                        __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6212                                        "vkBeginCommandBuffer(): Secondary Command Buffer (0x%p) must not have "
6213                                        "VK_QUERY_CONTROL_PRECISE_BIT if occulusionQuery is disabled or the device does not "
6214                                        "support precise occlusion queries.",
6215                                        reinterpret_cast<void *>(commandBuffer));
6216                }
6217            }
6218            if (pInfo && pInfo->renderPass != VK_NULL_HANDLE) {
6219                auto renderPass = getRenderPass(dev_data, pInfo->renderPass);
6220                if (renderPass) {
6221                    if (pInfo->subpass >= renderPass->pCreateInfo->subpassCount) {
6222                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6223                                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__,
6224                                            DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6225                                            "vkBeginCommandBuffer(): Secondary Command Buffers (0x%p) must has a subpass index (%d) "
6226                                            "that is less than the number of subpasses (%d).",
6227                                            (void *)commandBuffer, pInfo->subpass, renderPass->pCreateInfo->subpassCount);
6228                    }
6229                }
6230            }
6231        }
6232        if (CB_RECORDING == pCB->state) {
6233            skipCall |=
6234                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6235                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6236                        "vkBeginCommandBuffer(): Cannot call Begin on CB (0x%" PRIxLEAST64
6237                        ") in the RECORDING state. Must first call vkEndCommandBuffer().",
6238                        (uint64_t)commandBuffer);
6239        } else if (CB_RECORDED == pCB->state || (CB_INVALID == pCB->state && CMD_END == pCB->cmds.back().type)) {
6240            VkCommandPool cmdPool = pCB->createInfo.commandPool;
6241            if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & dev_data->commandPoolMap[cmdPool].createFlags)) {
6242                skipCall |=
6243                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6244                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
6245                            "Call to vkBeginCommandBuffer() on command buffer (0x%" PRIxLEAST64
6246                            ") attempts to implicitly reset cmdBuffer created from command pool (0x%" PRIxLEAST64
6247                            ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
6248                            (uint64_t)commandBuffer, (uint64_t)cmdPool);
6249            }
6250            resetCB(dev_data, commandBuffer);
6251        }
6252        // Set updated state here in case implicit reset occurs above
6253        pCB->state = CB_RECORDING;
6254        pCB->beginInfo = *pBeginInfo;
6255        if (pCB->beginInfo.pInheritanceInfo) {
6256            pCB->inheritanceInfo = *(pCB->beginInfo.pInheritanceInfo);
6257            pCB->beginInfo.pInheritanceInfo = &pCB->inheritanceInfo;
6258            // If we are a secondary command-buffer and inheriting.  Update the items we should inherit.
6259            if ((pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) &&
6260                (pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
6261                pCB->activeRenderPass = getRenderPass(dev_data, pCB->beginInfo.pInheritanceInfo->renderPass);
6262                pCB->activeSubpass = pCB->beginInfo.pInheritanceInfo->subpass;
6263                pCB->framebuffers.insert(pCB->beginInfo.pInheritanceInfo->framebuffer);
6264            }
6265        }
6266    } else {
6267        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6268                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
6269                            "In vkBeginCommandBuffer() and unable to find CommandBuffer Node for CB 0x%p!", (void *)commandBuffer);
6270    }
6271    lock.unlock();
6272    if (skipCall) {
6273        return VK_ERROR_VALIDATION_FAILED_EXT;
6274    }
6275    VkResult result = dev_data->device_dispatch_table->BeginCommandBuffer(commandBuffer, pBeginInfo);
6276
6277    return result;
6278}
6279
6280VKAPI_ATTR VkResult VKAPI_CALL EndCommandBuffer(VkCommandBuffer commandBuffer) {
6281    bool skipCall = false;
6282    VkResult result = VK_SUCCESS;
6283    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6284    std::unique_lock<std::mutex> lock(global_lock);
6285    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6286    if (pCB) {
6287        if ((VK_COMMAND_BUFFER_LEVEL_PRIMARY == pCB->createInfo.level) || !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
6288            // This needs spec clarification to update valid usage, see comments in PR:
6289            // https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/pull/516#discussion_r63013756
6290            skipCall |= insideRenderPass(dev_data, pCB, "vkEndCommandBuffer");
6291        }
6292        skipCall |= addCmd(dev_data, pCB, CMD_END, "vkEndCommandBuffer()");
6293        for (auto query : pCB->activeQueries) {
6294            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6295                                DRAWSTATE_INVALID_QUERY, "DS",
6296                                "Ending command buffer with in progress query: queryPool 0x%" PRIx64 ", index %d",
6297                                (uint64_t)(query.pool), query.index);
6298        }
6299    }
6300    if (!skipCall) {
6301        lock.unlock();
6302        result = dev_data->device_dispatch_table->EndCommandBuffer(commandBuffer);
6303        lock.lock();
6304        if (VK_SUCCESS == result) {
6305            pCB->state = CB_RECORDED;
6306            // Reset CB status flags
6307            pCB->status = 0;
6308            printCB(dev_data, commandBuffer);
6309        }
6310    } else {
6311        result = VK_ERROR_VALIDATION_FAILED_EXT;
6312    }
6313    lock.unlock();
6314    return result;
6315}
6316
6317VKAPI_ATTR VkResult VKAPI_CALL
6318ResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) {
6319    bool skip_call = false;
6320    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6321    std::unique_lock<std::mutex> lock(global_lock);
6322    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6323    VkCommandPool cmdPool = pCB->createInfo.commandPool;
6324    if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & dev_data->commandPoolMap[cmdPool].createFlags)) {
6325        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6326                             (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
6327                             "Attempt to reset command buffer (0x%" PRIxLEAST64 ") created from command pool (0x%" PRIxLEAST64
6328                             ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
6329                             (uint64_t)commandBuffer, (uint64_t)cmdPool);
6330    }
6331    skip_call |= checkAndClearCommandBufferInFlight(dev_data, pCB, "reset");
6332    lock.unlock();
6333    if (skip_call)
6334        return VK_ERROR_VALIDATION_FAILED_EXT;
6335    VkResult result = dev_data->device_dispatch_table->ResetCommandBuffer(commandBuffer, flags);
6336    if (VK_SUCCESS == result) {
6337        lock.lock();
6338        resetCB(dev_data, commandBuffer);
6339        lock.unlock();
6340    }
6341    return result;
6342}
6343
6344VKAPI_ATTR void VKAPI_CALL
6345CmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline) {
6346    bool skipCall = false;
6347    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6348    std::unique_lock<std::mutex> lock(global_lock);
6349    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6350    if (pCB) {
6351        skipCall |= addCmd(dev_data, pCB, CMD_BINDPIPELINE, "vkCmdBindPipeline()");
6352        if ((VK_PIPELINE_BIND_POINT_COMPUTE == pipelineBindPoint) && (pCB->activeRenderPass)) {
6353            skipCall |=
6354                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
6355                        (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
6356                        "Incorrectly binding compute pipeline (0x%" PRIxLEAST64 ") during active RenderPass (0x%" PRIxLEAST64 ")",
6357                        (uint64_t)pipeline, (uint64_t)pCB->activeRenderPass->renderPass);
6358        }
6359
6360        PIPELINE_NODE *pPN = getPipeline(dev_data, pipeline);
6361        if (pPN) {
6362            pCB->lastBound[pipelineBindPoint].pipeline = pipeline;
6363            set_cb_pso_status(pCB, pPN);
6364            set_pipeline_state(pPN);
6365        } else {
6366            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
6367                                (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_PIPELINE, "DS",
6368                                "Attempt to bind Pipeline 0x%" PRIxLEAST64 " that doesn't exist!", (uint64_t)(pipeline));
6369        }
6370    }
6371    lock.unlock();
6372    if (!skipCall)
6373        dev_data->device_dispatch_table->CmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline);
6374}
6375
6376VKAPI_ATTR void VKAPI_CALL
6377CmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport *pViewports) {
6378    bool skipCall = false;
6379    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6380    std::unique_lock<std::mutex> lock(global_lock);
6381    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6382    if (pCB) {
6383        skipCall |= addCmd(dev_data, pCB, CMD_SETVIEWPORTSTATE, "vkCmdSetViewport()");
6384        pCB->status |= CBSTATUS_VIEWPORT_SET;
6385        pCB->viewports.resize(viewportCount);
6386        memcpy(pCB->viewports.data(), pViewports, viewportCount * sizeof(VkViewport));
6387    }
6388    lock.unlock();
6389    if (!skipCall)
6390        dev_data->device_dispatch_table->CmdSetViewport(commandBuffer, firstViewport, viewportCount, pViewports);
6391}
6392
6393VKAPI_ATTR void VKAPI_CALL
6394CmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D *pScissors) {
6395    bool skipCall = false;
6396    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6397    std::unique_lock<std::mutex> lock(global_lock);
6398    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6399    if (pCB) {
6400        skipCall |= addCmd(dev_data, pCB, CMD_SETSCISSORSTATE, "vkCmdSetScissor()");
6401        pCB->status |= CBSTATUS_SCISSOR_SET;
6402        pCB->scissors.resize(scissorCount);
6403        memcpy(pCB->scissors.data(), pScissors, scissorCount * sizeof(VkRect2D));
6404    }
6405    lock.unlock();
6406    if (!skipCall)
6407        dev_data->device_dispatch_table->CmdSetScissor(commandBuffer, firstScissor, scissorCount, pScissors);
6408}
6409
6410VKAPI_ATTR void VKAPI_CALL CmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) {
6411    bool skip_call = false;
6412    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6413    std::unique_lock<std::mutex> lock(global_lock);
6414    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6415    if (pCB) {
6416        skip_call |= addCmd(dev_data, pCB, CMD_SETLINEWIDTHSTATE, "vkCmdSetLineWidth()");
6417        pCB->status |= CBSTATUS_LINE_WIDTH_SET;
6418
6419        PIPELINE_NODE *pPipeTrav = getPipeline(dev_data, pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline);
6420        if (pPipeTrav != NULL && !isDynamic(pPipeTrav, VK_DYNAMIC_STATE_LINE_WIDTH)) {
6421            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
6422                                 reinterpret_cast<uint64_t &>(commandBuffer), __LINE__, DRAWSTATE_INVALID_SET, "DS",
6423                                 "vkCmdSetLineWidth called but pipeline was created without VK_DYNAMIC_STATE_LINE_WIDTH "
6424                                 "flag.  This is undefined behavior and could be ignored.");
6425        } else {
6426            skip_call |= verifyLineWidth(dev_data, DRAWSTATE_INVALID_SET, reinterpret_cast<uint64_t &>(commandBuffer), lineWidth);
6427        }
6428    }
6429    lock.unlock();
6430    if (!skip_call)
6431        dev_data->device_dispatch_table->CmdSetLineWidth(commandBuffer, lineWidth);
6432}
6433
6434VKAPI_ATTR void VKAPI_CALL
6435CmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor) {
6436    bool skipCall = false;
6437    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6438    std::unique_lock<std::mutex> lock(global_lock);
6439    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6440    if (pCB) {
6441        skipCall |= addCmd(dev_data, pCB, CMD_SETDEPTHBIASSTATE, "vkCmdSetDepthBias()");
6442        pCB->status |= CBSTATUS_DEPTH_BIAS_SET;
6443    }
6444    lock.unlock();
6445    if (!skipCall)
6446        dev_data->device_dispatch_table->CmdSetDepthBias(commandBuffer, depthBiasConstantFactor, depthBiasClamp,
6447                                                         depthBiasSlopeFactor);
6448}
6449
6450VKAPI_ATTR void VKAPI_CALL CmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) {
6451    bool skipCall = false;
6452    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6453    std::unique_lock<std::mutex> lock(global_lock);
6454    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6455    if (pCB) {
6456        skipCall |= addCmd(dev_data, pCB, CMD_SETBLENDSTATE, "vkCmdSetBlendConstants()");
6457        pCB->status |= CBSTATUS_BLEND_CONSTANTS_SET;
6458    }
6459    lock.unlock();
6460    if (!skipCall)
6461        dev_data->device_dispatch_table->CmdSetBlendConstants(commandBuffer, blendConstants);
6462}
6463
6464VKAPI_ATTR void VKAPI_CALL
6465CmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) {
6466    bool skipCall = false;
6467    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6468    std::unique_lock<std::mutex> lock(global_lock);
6469    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6470    if (pCB) {
6471        skipCall |= addCmd(dev_data, pCB, CMD_SETDEPTHBOUNDSSTATE, "vkCmdSetDepthBounds()");
6472        pCB->status |= CBSTATUS_DEPTH_BOUNDS_SET;
6473    }
6474    lock.unlock();
6475    if (!skipCall)
6476        dev_data->device_dispatch_table->CmdSetDepthBounds(commandBuffer, minDepthBounds, maxDepthBounds);
6477}
6478
6479VKAPI_ATTR void VKAPI_CALL
6480CmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask) {
6481    bool skipCall = false;
6482    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6483    std::unique_lock<std::mutex> lock(global_lock);
6484    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6485    if (pCB) {
6486        skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILREADMASKSTATE, "vkCmdSetStencilCompareMask()");
6487        pCB->status |= CBSTATUS_STENCIL_READ_MASK_SET;
6488    }
6489    lock.unlock();
6490    if (!skipCall)
6491        dev_data->device_dispatch_table->CmdSetStencilCompareMask(commandBuffer, faceMask, compareMask);
6492}
6493
6494VKAPI_ATTR void VKAPI_CALL
6495CmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask) {
6496    bool skipCall = false;
6497    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6498    std::unique_lock<std::mutex> lock(global_lock);
6499    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6500    if (pCB) {
6501        skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILWRITEMASKSTATE, "vkCmdSetStencilWriteMask()");
6502        pCB->status |= CBSTATUS_STENCIL_WRITE_MASK_SET;
6503    }
6504    lock.unlock();
6505    if (!skipCall)
6506        dev_data->device_dispatch_table->CmdSetStencilWriteMask(commandBuffer, faceMask, writeMask);
6507}
6508
6509VKAPI_ATTR void VKAPI_CALL
6510CmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference) {
6511    bool skipCall = false;
6512    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6513    std::unique_lock<std::mutex> lock(global_lock);
6514    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6515    if (pCB) {
6516        skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILREFERENCESTATE, "vkCmdSetStencilReference()");
6517        pCB->status |= CBSTATUS_STENCIL_REFERENCE_SET;
6518    }
6519    lock.unlock();
6520    if (!skipCall)
6521        dev_data->device_dispatch_table->CmdSetStencilReference(commandBuffer, faceMask, reference);
6522}
6523
6524VKAPI_ATTR void VKAPI_CALL
6525CmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout,
6526                      uint32_t firstSet, uint32_t setCount, const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount,
6527                      const uint32_t *pDynamicOffsets) {
6528    bool skipCall = false;
6529    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6530    std::unique_lock<std::mutex> lock(global_lock);
6531    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6532    if (pCB) {
6533        if (pCB->state == CB_RECORDING) {
6534            // Track total count of dynamic descriptor types to make sure we have an offset for each one
6535            uint32_t totalDynamicDescriptors = 0;
6536            string errorString = "";
6537            uint32_t lastSetIndex = firstSet + setCount - 1;
6538            if (lastSetIndex >= pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size()) {
6539                pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1);
6540                pCB->lastBound[pipelineBindPoint].dynamicOffsets.resize(lastSetIndex + 1);
6541            }
6542            auto oldFinalBoundSet = pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex];
6543            for (uint32_t i = 0; i < setCount; i++) {
6544                cvdescriptorset::DescriptorSet *pSet = getSetNode(dev_data, pDescriptorSets[i]);
6545                if (pSet) {
6546                    pCB->lastBound[pipelineBindPoint].uniqueBoundSets.insert(pSet);
6547                    pSet->BindCommandBuffer(pCB);
6548                    pCB->lastBound[pipelineBindPoint].pipelineLayout = layout;
6549                    pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i + firstSet] = pSet;
6550                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
6551                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
6552                                        DRAWSTATE_NONE, "DS", "DS 0x%" PRIxLEAST64 " bound on pipeline %s",
6553                                        (uint64_t)pDescriptorSets[i], string_VkPipelineBindPoint(pipelineBindPoint));
6554                    if (!pSet->IsUpdated() && (pSet->GetTotalDescriptorCount() != 0)) {
6555                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
6556                                            VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i],
6557                                            __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
6558                                            "DS 0x%" PRIxLEAST64
6559                                            " bound but it was never updated. You may want to either update it or not bind it.",
6560                                            (uint64_t)pDescriptorSets[i]);
6561                    }
6562                    // Verify that set being bound is compatible with overlapping setLayout of pipelineLayout
6563                    if (!verify_set_layout_compatibility(dev_data, pSet, layout, i + firstSet, errorString)) {
6564                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6565                                            VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
6566                                            DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
6567                                            "descriptorSet #%u being bound is not compatible with overlapping descriptorSetLayout "
6568                                            "at index %u of pipelineLayout 0x%" PRIxLEAST64 " due to: %s",
6569                                            i, i + firstSet, reinterpret_cast<uint64_t &>(layout), errorString.c_str());
6570                    }
6571
6572                    auto setDynamicDescriptorCount = pSet->GetDynamicDescriptorCount();
6573
6574                    pCB->lastBound[pipelineBindPoint].dynamicOffsets[firstSet + i].clear();
6575
6576                    if (setDynamicDescriptorCount) {
6577                        // First make sure we won't overstep bounds of pDynamicOffsets array
6578                        if ((totalDynamicDescriptors + setDynamicDescriptorCount) > dynamicOffsetCount) {
6579                            skipCall |=
6580                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6581                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
6582                                        DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
6583                                        "descriptorSet #%u (0x%" PRIxLEAST64
6584                                        ") requires %u dynamicOffsets, but only %u dynamicOffsets are left in pDynamicOffsets "
6585                                        "array. There must be one dynamic offset for each dynamic descriptor being bound.",
6586                                        i, (uint64_t)pDescriptorSets[i], pSet->GetDynamicDescriptorCount(),
6587                                        (dynamicOffsetCount - totalDynamicDescriptors));
6588                        } else { // Validate and store dynamic offsets with the set
6589                            // Validate Dynamic Offset Minimums
6590                            uint32_t cur_dyn_offset = totalDynamicDescriptors;
6591                            for (uint32_t d = 0; d < pSet->GetTotalDescriptorCount(); d++) {
6592                                if (pSet->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
6593                                    if (vk_safe_modulo(
6594                                            pDynamicOffsets[cur_dyn_offset],
6595                                            dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment) != 0) {
6596                                        skipCall |= log_msg(
6597                                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6598                                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
6599                                            DRAWSTATE_INVALID_UNIFORM_BUFFER_OFFSET, "DS",
6600                                            "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
6601                                            "device limit minUniformBufferOffsetAlignment 0x%" PRIxLEAST64,
6602                                            cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
6603                                            dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment);
6604                                    }
6605                                    cur_dyn_offset++;
6606                                } else if (pSet->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
6607                                    if (vk_safe_modulo(
6608                                            pDynamicOffsets[cur_dyn_offset],
6609                                            dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment) != 0) {
6610                                        skipCall |= log_msg(
6611                                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6612                                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
6613                                            DRAWSTATE_INVALID_STORAGE_BUFFER_OFFSET, "DS",
6614                                            "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
6615                                            "device limit minStorageBufferOffsetAlignment 0x%" PRIxLEAST64,
6616                                            cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
6617                                            dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment);
6618                                    }
6619                                    cur_dyn_offset++;
6620                                }
6621                            }
6622
6623                            pCB->lastBound[pipelineBindPoint].dynamicOffsets[firstSet + i] =
6624                                std::vector<uint32_t>(pDynamicOffsets + totalDynamicDescriptors,
6625                                                      pDynamicOffsets + totalDynamicDescriptors + setDynamicDescriptorCount);
6626                            // Keep running total of dynamic descriptor count to verify at the end
6627                            totalDynamicDescriptors += setDynamicDescriptorCount;
6628
6629                        }
6630                    }
6631                } else {
6632                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6633                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
6634                                        DRAWSTATE_INVALID_SET, "DS", "Attempt to bind DS 0x%" PRIxLEAST64 " that doesn't exist!",
6635                                        (uint64_t)pDescriptorSets[i]);
6636                }
6637                skipCall |= addCmd(dev_data, pCB, CMD_BINDDESCRIPTORSETS, "vkCmdBindDescriptorSets()");
6638                // For any previously bound sets, need to set them to "invalid" if they were disturbed by this update
6639                if (firstSet > 0) { // Check set #s below the first bound set
6640                    for (uint32_t i = 0; i < firstSet; ++i) {
6641                        if (pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] &&
6642                            !verify_set_layout_compatibility(dev_data, pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i],
6643                                                             layout, i, errorString)) {
6644                            skipCall |= log_msg(
6645                                dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
6646                                VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
6647                                (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], __LINE__, DRAWSTATE_NONE, "DS",
6648                                "DescriptorSetDS 0x%" PRIxLEAST64
6649                                " previously bound as set #%u was disturbed by newly bound pipelineLayout (0x%" PRIxLEAST64 ")",
6650                                (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], i, (uint64_t)layout);
6651                            pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] = VK_NULL_HANDLE;
6652                        }
6653                    }
6654                }
6655                // Check if newly last bound set invalidates any remaining bound sets
6656                if ((pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size() - 1) > (lastSetIndex)) {
6657                    if (oldFinalBoundSet &&
6658                        !verify_set_layout_compatibility(dev_data, oldFinalBoundSet, layout, lastSetIndex, errorString)) {
6659                        auto old_set = oldFinalBoundSet->GetSet();
6660                        skipCall |=
6661                            log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
6662                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, reinterpret_cast<uint64_t &>(old_set), __LINE__,
6663                                    DRAWSTATE_NONE, "DS", "DescriptorSetDS 0x%" PRIxLEAST64
6664                                                          " previously bound as set #%u is incompatible with set 0x%" PRIxLEAST64
6665                                                          " newly bound as set #%u so set #%u and any subsequent sets were "
6666                                                          "disturbed by newly bound pipelineLayout (0x%" PRIxLEAST64 ")",
6667                                    reinterpret_cast<uint64_t &>(old_set), lastSetIndex,
6668                                    (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex], lastSetIndex,
6669                                    lastSetIndex + 1, (uint64_t)layout);
6670                        pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1);
6671                    }
6672                }
6673            }
6674            //  dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound
6675            if (totalDynamicDescriptors != dynamicOffsetCount) {
6676                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6677                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__,
6678                                    DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
6679                                    "Attempting to bind %u descriptorSets with %u dynamic descriptors, but dynamicOffsetCount "
6680                                    "is %u. It should exactly match the number of dynamic descriptors.",
6681                                    setCount, totalDynamicDescriptors, dynamicOffsetCount);
6682            }
6683        } else {
6684            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindDescriptorSets()");
6685        }
6686    }
6687    lock.unlock();
6688    if (!skipCall)
6689        dev_data->device_dispatch_table->CmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet, setCount,
6690                                                               pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
6691}
6692
6693VKAPI_ATTR void VKAPI_CALL
6694CmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType) {
6695    bool skipCall = false;
6696    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6697    // TODO : Somewhere need to verify that IBs have correct usage state flagged
6698    std::unique_lock<std::mutex> lock(global_lock);
6699    VkDeviceMemory mem;
6700    skipCall =
6701        get_mem_binding_from_object(dev_data, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
6702    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
6703    if (cb_data != dev_data->commandBufferMap.end()) {
6704        std::function<bool()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdBindIndexBuffer()"); };
6705        cb_data->second->validate_functions.push_back(function);
6706        skipCall |= addCmd(dev_data, cb_data->second, CMD_BINDINDEXBUFFER, "vkCmdBindIndexBuffer()");
6707        VkDeviceSize offset_align = 0;
6708        switch (indexType) {
6709        case VK_INDEX_TYPE_UINT16:
6710            offset_align = 2;
6711            break;
6712        case VK_INDEX_TYPE_UINT32:
6713            offset_align = 4;
6714            break;
6715        default:
6716            // ParamChecker should catch bad enum, we'll also throw alignment error below if offset_align stays 0
6717            break;
6718        }
6719        if (!offset_align || (offset % offset_align)) {
6720            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6721                                DRAWSTATE_VTX_INDEX_ALIGNMENT_ERROR, "DS",
6722                                "vkCmdBindIndexBuffer() offset (0x%" PRIxLEAST64 ") does not fall on alignment (%s) boundary.",
6723                                offset, string_VkIndexType(indexType));
6724        }
6725        cb_data->second->status |= CBSTATUS_INDEX_BUFFER_BOUND;
6726    }
6727    lock.unlock();
6728    if (!skipCall)
6729        dev_data->device_dispatch_table->CmdBindIndexBuffer(commandBuffer, buffer, offset, indexType);
6730}
6731
6732void updateResourceTracking(GLOBAL_CB_NODE *pCB, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers) {
6733    uint32_t end = firstBinding + bindingCount;
6734    if (pCB->currentDrawData.buffers.size() < end) {
6735        pCB->currentDrawData.buffers.resize(end);
6736    }
6737    for (uint32_t i = 0; i < bindingCount; ++i) {
6738        pCB->currentDrawData.buffers[i + firstBinding] = pBuffers[i];
6739    }
6740}
6741
6742static inline void updateResourceTrackingOnDraw(GLOBAL_CB_NODE *pCB) { pCB->drawData.push_back(pCB->currentDrawData); }
6743
6744VKAPI_ATTR void VKAPI_CALL CmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding,
6745                                                uint32_t bindingCount, const VkBuffer *pBuffers,
6746                                                const VkDeviceSize *pOffsets) {
6747    bool skipCall = false;
6748    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6749    // TODO : Somewhere need to verify that VBs have correct usage state flagged
6750    std::unique_lock<std::mutex> lock(global_lock);
6751    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
6752    if (cb_data != dev_data->commandBufferMap.end()) {
6753        for (uint32_t i = 0; i < bindingCount; ++i) {
6754            VkDeviceMemory mem;
6755            skipCall |= get_mem_binding_from_object(dev_data, (uint64_t)pBuffers[i], VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
6756
6757            std::function<bool()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdBindVertexBuffers()"); };
6758            cb_data->second->validate_functions.push_back(function);
6759        }
6760        addCmd(dev_data, cb_data->second, CMD_BINDVERTEXBUFFER, "vkCmdBindVertexBuffer()");
6761        updateResourceTracking(cb_data->second, firstBinding, bindingCount, pBuffers);
6762    } else {
6763        skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindVertexBuffer()");
6764    }
6765    lock.unlock();
6766    if (!skipCall)
6767        dev_data->device_dispatch_table->CmdBindVertexBuffers(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets);
6768}
6769
6770/* expects global_lock to be held by caller */
6771static bool markStoreImagesAndBuffersAsWritten(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
6772    bool skip_call = false;
6773
6774    for (auto imageView : pCB->updateImages) {
6775        auto iv_data = getImageViewData(dev_data, imageView);
6776        if (!iv_data)
6777            continue;
6778        VkImage image = iv_data->image;
6779        VkDeviceMemory mem;
6780        skip_call |=
6781            get_mem_binding_from_object(dev_data, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
6782        std::function<bool()> function = [=]() {
6783            set_memory_valid(dev_data, mem, true, image);
6784            return false;
6785        };
6786        pCB->validate_functions.push_back(function);
6787    }
6788    for (auto buffer : pCB->updateBuffers) {
6789        VkDeviceMemory mem;
6790        skip_call |= get_mem_binding_from_object(dev_data, (uint64_t)buffer,
6791                                                 VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
6792        std::function<bool()> function = [=]() {
6793            set_memory_valid(dev_data, mem, true);
6794            return false;
6795        };
6796        pCB->validate_functions.push_back(function);
6797    }
6798    return skip_call;
6799}
6800
6801VKAPI_ATTR void VKAPI_CALL CmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
6802                                   uint32_t firstVertex, uint32_t firstInstance) {
6803    bool skipCall = false;
6804    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6805    std::unique_lock<std::mutex> lock(global_lock);
6806    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6807    if (pCB) {
6808        skipCall |= addCmd(dev_data, pCB, CMD_DRAW, "vkCmdDraw()");
6809        pCB->drawCount[DRAW]++;
6810        skipCall |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_GRAPHICS);
6811        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
6812        // TODO : Need to pass commandBuffer as srcObj here
6813        skipCall |=
6814            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
6815                    __LINE__, DRAWSTATE_NONE, "DS", "vkCmdDraw() call 0x%" PRIx64 ", reporting DS state:", g_drawCount[DRAW]++);
6816        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
6817        if (!skipCall) {
6818            updateResourceTrackingOnDraw(pCB);
6819        }
6820        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDraw");
6821    }
6822    lock.unlock();
6823    if (!skipCall)
6824        dev_data->device_dispatch_table->CmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
6825}
6826
6827VKAPI_ATTR void VKAPI_CALL CmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount,
6828                                          uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset,
6829                                                            uint32_t firstInstance) {
6830    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6831    bool skipCall = false;
6832    std::unique_lock<std::mutex> lock(global_lock);
6833    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6834    if (pCB) {
6835        skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDEXED, "vkCmdDrawIndexed()");
6836        pCB->drawCount[DRAW_INDEXED]++;
6837        skipCall |= validate_and_update_draw_state(dev_data, pCB, true, VK_PIPELINE_BIND_POINT_GRAPHICS);
6838        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
6839        // TODO : Need to pass commandBuffer as srcObj here
6840        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
6841                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS",
6842                            "vkCmdDrawIndexed() call 0x%" PRIx64 ", reporting DS state:", g_drawCount[DRAW_INDEXED]++);
6843        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
6844        if (!skipCall) {
6845            updateResourceTrackingOnDraw(pCB);
6846        }
6847        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndexed");
6848    }
6849    lock.unlock();
6850    if (!skipCall)
6851        dev_data->device_dispatch_table->CmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset,
6852                                                        firstInstance);
6853}
6854
6855VKAPI_ATTR void VKAPI_CALL
6856CmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
6857    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6858    bool skipCall = false;
6859    std::unique_lock<std::mutex> lock(global_lock);
6860    VkDeviceMemory mem;
6861    // MTMTODO : merge with code below
6862    skipCall =
6863        get_mem_binding_from_object(dev_data, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
6864    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdDrawIndirect");
6865    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6866    if (pCB) {
6867        skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDIRECT, "vkCmdDrawIndirect()");
6868        pCB->drawCount[DRAW_INDIRECT]++;
6869        skipCall |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_GRAPHICS);
6870        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
6871        // TODO : Need to pass commandBuffer as srcObj here
6872        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
6873                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS",
6874                            "vkCmdDrawIndirect() call 0x%" PRIx64 ", reporting DS state:", g_drawCount[DRAW_INDIRECT]++);
6875        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
6876        if (!skipCall) {
6877            updateResourceTrackingOnDraw(pCB);
6878        }
6879        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndirect");
6880    }
6881    lock.unlock();
6882    if (!skipCall)
6883        dev_data->device_dispatch_table->CmdDrawIndirect(commandBuffer, buffer, offset, count, stride);
6884}
6885
6886VKAPI_ATTR void VKAPI_CALL
6887CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
6888    bool skipCall = false;
6889    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6890    std::unique_lock<std::mutex> lock(global_lock);
6891    VkDeviceMemory mem;
6892    // MTMTODO : merge with code below
6893    skipCall =
6894        get_mem_binding_from_object(dev_data, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
6895    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdDrawIndexedIndirect");
6896    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6897    if (pCB) {
6898        skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDEXEDINDIRECT, "vkCmdDrawIndexedIndirect()");
6899        pCB->drawCount[DRAW_INDEXED_INDIRECT]++;
6900        skipCall |= validate_and_update_draw_state(dev_data, pCB, true, VK_PIPELINE_BIND_POINT_GRAPHICS);
6901        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
6902        // TODO : Need to pass commandBuffer as srcObj here
6903        skipCall |=
6904            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
6905                    __LINE__, DRAWSTATE_NONE, "DS", "vkCmdDrawIndexedIndirect() call 0x%" PRIx64 ", reporting DS state:",
6906                    g_drawCount[DRAW_INDEXED_INDIRECT]++);
6907        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
6908        if (!skipCall) {
6909            updateResourceTrackingOnDraw(pCB);
6910        }
6911        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndexedIndirect");
6912    }
6913    lock.unlock();
6914    if (!skipCall)
6915        dev_data->device_dispatch_table->CmdDrawIndexedIndirect(commandBuffer, buffer, offset, count, stride);
6916}
6917
6918VKAPI_ATTR void VKAPI_CALL CmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
6919    bool skipCall = false;
6920    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6921    std::unique_lock<std::mutex> lock(global_lock);
6922    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6923    if (pCB) {
6924        skipCall |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_COMPUTE);
6925        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
6926        skipCall |= addCmd(dev_data, pCB, CMD_DISPATCH, "vkCmdDispatch()");
6927        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdDispatch");
6928    }
6929    lock.unlock();
6930    if (!skipCall)
6931        dev_data->device_dispatch_table->CmdDispatch(commandBuffer, x, y, z);
6932}
6933
6934VKAPI_ATTR void VKAPI_CALL
6935CmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
6936    bool skipCall = false;
6937    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6938    std::unique_lock<std::mutex> lock(global_lock);
6939    VkDeviceMemory mem;
6940    skipCall =
6941        get_mem_binding_from_object(dev_data, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
6942    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdDispatchIndirect");
6943    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6944    if (pCB) {
6945        skipCall |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_COMPUTE);
6946        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
6947        skipCall |= addCmd(dev_data, pCB, CMD_DISPATCHINDIRECT, "vkCmdDispatchIndirect()");
6948        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdDispatchIndirect");
6949    }
6950    lock.unlock();
6951    if (!skipCall)
6952        dev_data->device_dispatch_table->CmdDispatchIndirect(commandBuffer, buffer, offset);
6953}
6954
6955VKAPI_ATTR void VKAPI_CALL CmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
6956                                         uint32_t regionCount, const VkBufferCopy *pRegions) {
6957    bool skipCall = false;
6958    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6959    std::unique_lock<std::mutex> lock(global_lock);
6960    VkDeviceMemory src_mem, dst_mem;
6961    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)srcBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &src_mem);
6962    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, src_mem, "vkCmdCopyBuffer");
6963    skipCall |= get_mem_binding_from_object(dev_data, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &dst_mem);
6964
6965    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, dst_mem, "vkCmdCopyBuffer");
6966    // Validate that SRC & DST buffers have correct usage flags set
6967    skipCall |= validate_buffer_usage_flags(dev_data, srcBuffer, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true,
6968                                            "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
6969    skipCall |= validate_buffer_usage_flags(dev_data, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
6970                                            "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
6971    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
6972    if (cb_data != dev_data->commandBufferMap.end()) {
6973        std::function<bool()> function = [=]() { return validate_memory_is_valid(dev_data, src_mem, "vkCmdCopyBuffer()"); };
6974        cb_data->second->validate_functions.push_back(function);
6975        function = [=]() {
6976            set_memory_valid(dev_data, dst_mem, true);
6977            return false;
6978        };
6979        cb_data->second->validate_functions.push_back(function);
6980
6981        skipCall |= addCmd(dev_data, cb_data->second, CMD_COPYBUFFER, "vkCmdCopyBuffer()");
6982        skipCall |= insideRenderPass(dev_data, cb_data->second, "vkCmdCopyBuffer");
6983    }
6984    lock.unlock();
6985    if (!skipCall)
6986        dev_data->device_dispatch_table->CmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions);
6987}
6988
6989static bool VerifySourceImageLayout(VkCommandBuffer cmdBuffer, VkImage srcImage, VkImageSubresourceLayers subLayers,
6990                                    VkImageLayout srcImageLayout) {
6991    bool skip_call = false;
6992
6993    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
6994    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
6995    for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
6996        uint32_t layer = i + subLayers.baseArrayLayer;
6997        VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
6998        IMAGE_CMD_BUF_LAYOUT_NODE node;
6999        if (!FindLayout(pCB, srcImage, sub, node)) {
7000            SetLayout(pCB, srcImage, sub, IMAGE_CMD_BUF_LAYOUT_NODE(srcImageLayout, srcImageLayout));
7001            continue;
7002        }
7003        if (node.layout != srcImageLayout) {
7004            // TODO: Improve log message in the next pass
7005            skip_call |=
7006                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7007                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose source layout is %s "
7008                                                                        "and doesn't match the current layout %s.",
7009                        string_VkImageLayout(srcImageLayout), string_VkImageLayout(node.layout));
7010        }
7011    }
7012    if (srcImageLayout != VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL) {
7013        if (srcImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
7014            // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
7015            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
7016                                 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
7017                                 "Layout for input image should be TRANSFER_SRC_OPTIMAL instead of GENERAL.");
7018        } else {
7019            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7020                                 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Layout for input image is %s but can only be "
7021                                                                       "TRANSFER_SRC_OPTIMAL or GENERAL.",
7022                                 string_VkImageLayout(srcImageLayout));
7023        }
7024    }
7025    return skip_call;
7026}
7027
7028static bool VerifyDestImageLayout(VkCommandBuffer cmdBuffer, VkImage destImage, VkImageSubresourceLayers subLayers,
7029                                  VkImageLayout destImageLayout) {
7030    bool skip_call = false;
7031
7032    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
7033    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
7034    for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
7035        uint32_t layer = i + subLayers.baseArrayLayer;
7036        VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
7037        IMAGE_CMD_BUF_LAYOUT_NODE node;
7038        if (!FindLayout(pCB, destImage, sub, node)) {
7039            SetLayout(pCB, destImage, sub, IMAGE_CMD_BUF_LAYOUT_NODE(destImageLayout, destImageLayout));
7040            continue;
7041        }
7042        if (node.layout != destImageLayout) {
7043            skip_call |=
7044                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7045                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose dest layout is %s and "
7046                                                                        "doesn't match the current layout %s.",
7047                        string_VkImageLayout(destImageLayout), string_VkImageLayout(node.layout));
7048        }
7049    }
7050    if (destImageLayout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
7051        if (destImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
7052            // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
7053            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
7054                                 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
7055                                 "Layout for output image should be TRANSFER_DST_OPTIMAL instead of GENERAL.");
7056        } else {
7057            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7058                                 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Layout for output image is %s but can only be "
7059                                                                       "TRANSFER_DST_OPTIMAL or GENERAL.",
7060                                 string_VkImageLayout(destImageLayout));
7061        }
7062    }
7063    return skip_call;
7064}
7065
7066VKAPI_ATTR void VKAPI_CALL
7067CmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
7068             VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy *pRegions) {
7069    bool skipCall = false;
7070    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7071    std::unique_lock<std::mutex> lock(global_lock);
7072    VkDeviceMemory src_mem, dst_mem;
7073    // Validate that src & dst images have correct usage flags set
7074    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &src_mem);
7075    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, src_mem, "vkCmdCopyImage");
7076
7077    skipCall |= get_mem_binding_from_object(dev_data, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &dst_mem);
7078    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, dst_mem, "vkCmdCopyImage");
7079    skipCall |= validate_image_usage_flags(dev_data, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
7080                                           "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
7081    skipCall |= validate_image_usage_flags(dev_data, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
7082                                           "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
7083    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7084    if (cb_data != dev_data->commandBufferMap.end()) {
7085        std::function<bool()> function = [=]() {
7086            return validate_memory_is_valid(dev_data, src_mem, "vkCmdCopyImage()", srcImage);
7087        };
7088        cb_data->second->validate_functions.push_back(function);
7089        function = [=]() {
7090            set_memory_valid(dev_data, dst_mem, true, dstImage);
7091            return false;
7092        };
7093        cb_data->second->validate_functions.push_back(function);
7094
7095        skipCall |= addCmd(dev_data, cb_data->second, CMD_COPYIMAGE, "vkCmdCopyImage()");
7096        skipCall |= insideRenderPass(dev_data, cb_data->second, "vkCmdCopyImage");
7097        for (uint32_t i = 0; i < regionCount; ++i) {
7098            skipCall |= VerifySourceImageLayout(commandBuffer, srcImage, pRegions[i].srcSubresource, srcImageLayout);
7099            skipCall |= VerifyDestImageLayout(commandBuffer, dstImage, pRegions[i].dstSubresource, dstImageLayout);
7100        }
7101    }
7102    lock.unlock();
7103    if (!skipCall)
7104        dev_data->device_dispatch_table->CmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
7105                                                      regionCount, pRegions);
7106}
7107
7108VKAPI_ATTR void VKAPI_CALL
7109CmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
7110             VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit *pRegions, VkFilter filter) {
7111    bool skipCall = false;
7112    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7113    std::unique_lock<std::mutex> lock(global_lock);
7114    VkDeviceMemory src_mem, dst_mem;
7115    // Validate that src & dst images have correct usage flags set
7116    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &src_mem);
7117    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, src_mem, "vkCmdBlitImage");
7118
7119    skipCall |= get_mem_binding_from_object(dev_data, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &dst_mem);
7120    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, dst_mem, "vkCmdBlitImage");
7121    skipCall |= validate_image_usage_flags(dev_data, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
7122                                           "vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
7123    skipCall |= validate_image_usage_flags(dev_data, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
7124                                           "vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
7125
7126    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7127    if (cb_data != dev_data->commandBufferMap.end()) {
7128        std::function<bool()> function = [=]() {
7129            return validate_memory_is_valid(dev_data, src_mem, "vkCmdBlitImage()", srcImage);
7130        };
7131        cb_data->second->validate_functions.push_back(function);
7132        function = [=]() {
7133            set_memory_valid(dev_data, dst_mem, true, dstImage);
7134            return false;
7135        };
7136        cb_data->second->validate_functions.push_back(function);
7137
7138        skipCall |= addCmd(dev_data, cb_data->second, CMD_BLITIMAGE, "vkCmdBlitImage()");
7139        skipCall |= insideRenderPass(dev_data, cb_data->second, "vkCmdBlitImage");
7140    }
7141    lock.unlock();
7142    if (!skipCall)
7143        dev_data->device_dispatch_table->CmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
7144                                                      regionCount, pRegions, filter);
7145}
7146
7147VKAPI_ATTR void VKAPI_CALL CmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer,
7148                                                VkImage dstImage, VkImageLayout dstImageLayout,
7149                                                uint32_t regionCount, const VkBufferImageCopy *pRegions) {
7150    bool skipCall = false;
7151    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7152    std::unique_lock<std::mutex> lock(global_lock);
7153    VkDeviceMemory dst_mem, src_mem;
7154    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &dst_mem);
7155    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, dst_mem, "vkCmdCopyBufferToImage");
7156
7157    skipCall |= get_mem_binding_from_object(dev_data, (uint64_t)srcBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &src_mem);
7158    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, src_mem, "vkCmdCopyBufferToImage");
7159    // Validate that src buff & dst image have correct usage flags set
7160    skipCall |= validate_buffer_usage_flags(dev_data, srcBuffer, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true, "vkCmdCopyBufferToImage()",
7161                                            "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
7162    skipCall |= validate_image_usage_flags(dev_data, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true, "vkCmdCopyBufferToImage()",
7163                                           "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
7164    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7165    if (cb_data != dev_data->commandBufferMap.end()) {
7166        std::function<bool()> function = [=]() {
7167            set_memory_valid(dev_data, dst_mem, true, dstImage);
7168            return false;
7169        };
7170        cb_data->second->validate_functions.push_back(function);
7171        function = [=]() { return validate_memory_is_valid(dev_data, src_mem, "vkCmdCopyBufferToImage()"); };
7172        cb_data->second->validate_functions.push_back(function);
7173
7174        skipCall |= addCmd(dev_data, cb_data->second, CMD_COPYBUFFERTOIMAGE, "vkCmdCopyBufferToImage()");
7175        skipCall |= insideRenderPass(dev_data, cb_data->second, "vkCmdCopyBufferToImage");
7176        for (uint32_t i = 0; i < regionCount; ++i) {
7177            skipCall |= VerifyDestImageLayout(commandBuffer, dstImage, pRegions[i].imageSubresource, dstImageLayout);
7178        }
7179    }
7180    lock.unlock();
7181    if (!skipCall)
7182        dev_data->device_dispatch_table->CmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount,
7183                                                              pRegions);
7184}
7185
7186VKAPI_ATTR void VKAPI_CALL CmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage,
7187                                                VkImageLayout srcImageLayout, VkBuffer dstBuffer,
7188                                                uint32_t regionCount, const VkBufferImageCopy *pRegions) {
7189    bool skipCall = false;
7190    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7191    std::unique_lock<std::mutex> lock(global_lock);
7192    VkDeviceMemory src_mem, dst_mem;
7193    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &src_mem);
7194    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, src_mem, "vkCmdCopyImageToBuffer");
7195
7196    skipCall |= get_mem_binding_from_object(dev_data, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &dst_mem);
7197    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, dst_mem, "vkCmdCopyImageToBuffer");
7198    // Validate that dst buff & src image have correct usage flags set
7199    skipCall |= validate_image_usage_flags(dev_data, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true, "vkCmdCopyImageToBuffer()",
7200                                           "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
7201    skipCall |= validate_buffer_usage_flags(dev_data, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "vkCmdCopyImageToBuffer()",
7202                                            "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
7203
7204    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7205    if (cb_data != dev_data->commandBufferMap.end()) {
7206        std::function<bool()> function = [=]() {
7207            return validate_memory_is_valid(dev_data, src_mem, "vkCmdCopyImageToBuffer()", srcImage);
7208        };
7209        cb_data->second->validate_functions.push_back(function);
7210        function = [=]() {
7211            set_memory_valid(dev_data, dst_mem, true);
7212            return false;
7213        };
7214        cb_data->second->validate_functions.push_back(function);
7215
7216        skipCall |= addCmd(dev_data, cb_data->second, CMD_COPYIMAGETOBUFFER, "vkCmdCopyImageToBuffer()");
7217        skipCall |= insideRenderPass(dev_data, cb_data->second, "vkCmdCopyImageToBuffer");
7218        for (uint32_t i = 0; i < regionCount; ++i) {
7219            skipCall |= VerifySourceImageLayout(commandBuffer, srcImage, pRegions[i].imageSubresource, srcImageLayout);
7220        }
7221    }
7222    lock.unlock();
7223    if (!skipCall)
7224        dev_data->device_dispatch_table->CmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount,
7225                                                              pRegions);
7226}
7227
7228VKAPI_ATTR void VKAPI_CALL CmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer,
7229                                           VkDeviceSize dstOffset, VkDeviceSize dataSize, const uint32_t *pData) {
7230    bool skipCall = false;
7231    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7232    std::unique_lock<std::mutex> lock(global_lock);
7233    VkDeviceMemory mem;
7234    skipCall =
7235        get_mem_binding_from_object(dev_data, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7236    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdUpdateBuffer");
7237    // Validate that dst buff has correct usage flags set
7238    skipCall |= validate_buffer_usage_flags(dev_data, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "vkCmdUpdateBuffer()",
7239                                            "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
7240
7241    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7242    if (cb_data != dev_data->commandBufferMap.end()) {
7243        std::function<bool()> function = [=]() {
7244            set_memory_valid(dev_data, mem, true);
7245            return false;
7246        };
7247        cb_data->second->validate_functions.push_back(function);
7248
7249        skipCall |= addCmd(dev_data, cb_data->second, CMD_UPDATEBUFFER, "vkCmdUpdateBuffer()");
7250        skipCall |= insideRenderPass(dev_data, cb_data->second, "vkCmdCopyUpdateBuffer");
7251    }
7252    lock.unlock();
7253    if (!skipCall)
7254        dev_data->device_dispatch_table->CmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData);
7255}
7256
7257VKAPI_ATTR void VKAPI_CALL
7258CmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data) {
7259    bool skipCall = false;
7260    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7261    std::unique_lock<std::mutex> lock(global_lock);
7262    VkDeviceMemory mem;
7263    skipCall =
7264        get_mem_binding_from_object(dev_data, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7265    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdFillBuffer");
7266    // Validate that dst buff has correct usage flags set
7267    skipCall |= validate_buffer_usage_flags(dev_data, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "vkCmdFillBuffer()",
7268                                            "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
7269
7270    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7271    if (cb_data != dev_data->commandBufferMap.end()) {
7272        std::function<bool()> function = [=]() {
7273            set_memory_valid(dev_data, mem, true);
7274            return false;
7275        };
7276        cb_data->second->validate_functions.push_back(function);
7277
7278        skipCall |= addCmd(dev_data, cb_data->second, CMD_FILLBUFFER, "vkCmdFillBuffer()");
7279        skipCall |= insideRenderPass(dev_data, cb_data->second, "vkCmdCopyFillBuffer");
7280    }
7281    lock.unlock();
7282    if (!skipCall)
7283        dev_data->device_dispatch_table->CmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
7284}
7285
7286VKAPI_ATTR void VKAPI_CALL CmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
7287                                               const VkClearAttachment *pAttachments, uint32_t rectCount,
7288                                               const VkClearRect *pRects) {
7289    bool skipCall = false;
7290    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7291    std::unique_lock<std::mutex> lock(global_lock);
7292    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7293    if (pCB) {
7294        skipCall |= addCmd(dev_data, pCB, CMD_CLEARATTACHMENTS, "vkCmdClearAttachments()");
7295        // Warn if this is issued prior to Draw Cmd and clearing the entire attachment
7296        if (!hasDrawCmd(pCB) && (pCB->activeRenderPassBeginInfo.renderArea.extent.width == pRects[0].rect.extent.width) &&
7297            (pCB->activeRenderPassBeginInfo.renderArea.extent.height == pRects[0].rect.extent.height)) {
7298            // TODO : commandBuffer should be srcObj
7299            // There are times where app needs to use ClearAttachments (generally when reusing a buffer inside of a render pass)
7300            // Can we make this warning more specific? I'd like to avoid triggering this test if we can tell it's a use that must
7301            // call CmdClearAttachments
7302            // Otherwise this seems more like a performance warning.
7303            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7304                                VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, 0, DRAWSTATE_CLEAR_CMD_BEFORE_DRAW, "DS",
7305                                "vkCmdClearAttachments() issued on CB object 0x%" PRIxLEAST64 " prior to any Draw Cmds."
7306                                " It is recommended you use RenderPass LOAD_OP_CLEAR on Attachments prior to any Draw.",
7307                                (uint64_t)(commandBuffer));
7308        }
7309        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdClearAttachments");
7310    }
7311
7312    // Validate that attachment is in reference list of active subpass
7313    if (pCB->activeRenderPass) {
7314        const VkRenderPassCreateInfo *pRPCI = pCB->activeRenderPass->pCreateInfo;
7315        const VkSubpassDescription *pSD = &pRPCI->pSubpasses[pCB->activeSubpass];
7316
7317        for (uint32_t attachment_idx = 0; attachment_idx < attachmentCount; attachment_idx++) {
7318            const VkClearAttachment *attachment = &pAttachments[attachment_idx];
7319            if (attachment->aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) {
7320                bool found = false;
7321                for (uint32_t i = 0; i < pSD->colorAttachmentCount; i++) {
7322                    if (attachment->colorAttachment == pSD->pColorAttachments[i].attachment) {
7323                        found = true;
7324                        break;
7325                    }
7326                }
7327                if (!found) {
7328                    skipCall |= log_msg(
7329                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7330                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
7331                        "vkCmdClearAttachments() attachment index %d not found in attachment reference array of active subpass %d",
7332                        attachment->colorAttachment, pCB->activeSubpass);
7333                }
7334            } else if (attachment->aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
7335                if (!pSD->pDepthStencilAttachment || // Says no DS will be used in active subpass
7336                    (pSD->pDepthStencilAttachment->attachment ==
7337                     VK_ATTACHMENT_UNUSED)) { // Says no DS will be used in active subpass
7338
7339                    skipCall |= log_msg(
7340                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7341                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
7342                        "vkCmdClearAttachments() attachment index %d does not match depthStencilAttachment.attachment (%d) found "
7343                        "in active subpass %d",
7344                        attachment->colorAttachment,
7345                        (pSD->pDepthStencilAttachment) ? pSD->pDepthStencilAttachment->attachment : VK_ATTACHMENT_UNUSED,
7346                        pCB->activeSubpass);
7347                }
7348            }
7349        }
7350    }
7351    lock.unlock();
7352    if (!skipCall)
7353        dev_data->device_dispatch_table->CmdClearAttachments(commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
7354}
7355
7356VKAPI_ATTR void VKAPI_CALL CmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image,
7357                                              VkImageLayout imageLayout, const VkClearColorValue *pColor,
7358                                              uint32_t rangeCount, const VkImageSubresourceRange *pRanges) {
7359    bool skipCall = false;
7360    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7361    std::unique_lock<std::mutex> lock(global_lock);
7362    // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
7363    VkDeviceMemory mem;
7364    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7365    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdClearColorImage");
7366    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7367    if (cb_data != dev_data->commandBufferMap.end()) {
7368        std::function<bool()> function = [=]() {
7369            set_memory_valid(dev_data, mem, true, image);
7370            return false;
7371        };
7372        cb_data->second->validate_functions.push_back(function);
7373
7374        skipCall |= addCmd(dev_data, cb_data->second, CMD_CLEARCOLORIMAGE, "vkCmdClearColorImage()");
7375        skipCall |= insideRenderPass(dev_data, cb_data->second, "vkCmdClearColorImage");
7376    }
7377    lock.unlock();
7378    if (!skipCall)
7379        dev_data->device_dispatch_table->CmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
7380}
7381
7382VKAPI_ATTR void VKAPI_CALL
7383CmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
7384                          const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
7385                          const VkImageSubresourceRange *pRanges) {
7386    bool skipCall = false;
7387    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7388    std::unique_lock<std::mutex> lock(global_lock);
7389    // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
7390    VkDeviceMemory mem;
7391    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7392    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdClearDepthStencilImage");
7393    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7394    if (cb_data != dev_data->commandBufferMap.end()) {
7395        std::function<bool()> function = [=]() {
7396            set_memory_valid(dev_data, mem, true, image);
7397            return false;
7398        };
7399        cb_data->second->validate_functions.push_back(function);
7400
7401        skipCall |= addCmd(dev_data, cb_data->second, CMD_CLEARDEPTHSTENCILIMAGE, "vkCmdClearDepthStencilImage()");
7402        skipCall |= insideRenderPass(dev_data, cb_data->second, "vkCmdClearDepthStencilImage");
7403    }
7404    lock.unlock();
7405    if (!skipCall)
7406        dev_data->device_dispatch_table->CmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount,
7407                                                                   pRanges);
7408}
7409
7410VKAPI_ATTR void VKAPI_CALL
7411CmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
7412                VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve *pRegions) {
7413    bool skipCall = false;
7414    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7415    std::unique_lock<std::mutex> lock(global_lock);
7416    VkDeviceMemory src_mem, dst_mem;
7417    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &src_mem);
7418    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, src_mem, "vkCmdResolveImage");
7419
7420    skipCall |= get_mem_binding_from_object(dev_data, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &dst_mem);
7421    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, dst_mem, "vkCmdResolveImage");
7422    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7423    if (cb_data != dev_data->commandBufferMap.end()) {
7424        std::function<bool()> function = [=]() {
7425            return validate_memory_is_valid(dev_data, src_mem, "vkCmdResolveImage()", srcImage);
7426        };
7427        cb_data->second->validate_functions.push_back(function);
7428        function = [=]() {
7429            set_memory_valid(dev_data, dst_mem, true, dstImage);
7430            return false;
7431        };
7432        cb_data->second->validate_functions.push_back(function);
7433
7434        skipCall |= addCmd(dev_data, cb_data->second, CMD_RESOLVEIMAGE, "vkCmdResolveImage()");
7435        skipCall |= insideRenderPass(dev_data, cb_data->second, "vkCmdResolveImage");
7436    }
7437    lock.unlock();
7438    if (!skipCall)
7439        dev_data->device_dispatch_table->CmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
7440                                                         regionCount, pRegions);
7441}
7442
7443bool setEventStageMask(VkQueue queue, VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
7444    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7445    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7446    if (pCB) {
7447        pCB->eventToStageMap[event] = stageMask;
7448    }
7449    auto queue_data = dev_data->queueMap.find(queue);
7450    if (queue_data != dev_data->queueMap.end()) {
7451        queue_data->second.eventToStageMap[event] = stageMask;
7452    }
7453    return false;
7454}
7455
7456VKAPI_ATTR void VKAPI_CALL
7457CmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
7458    bool skipCall = false;
7459    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7460    std::unique_lock<std::mutex> lock(global_lock);
7461    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7462    if (pCB) {
7463        skipCall |= addCmd(dev_data, pCB, CMD_SETEVENT, "vkCmdSetEvent()");
7464        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdSetEvent");
7465        pCB->events.push_back(event);
7466        if (!pCB->waitedEvents.count(event)) {
7467            pCB->writeEventsBeforeWait.push_back(event);
7468        }
7469        std::function<bool(VkQueue)> eventUpdate =
7470            std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, stageMask);
7471        pCB->eventUpdates.push_back(eventUpdate);
7472    }
7473    lock.unlock();
7474    if (!skipCall)
7475        dev_data->device_dispatch_table->CmdSetEvent(commandBuffer, event, stageMask);
7476}
7477
7478VKAPI_ATTR void VKAPI_CALL
7479CmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
7480    bool skipCall = false;
7481    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7482    std::unique_lock<std::mutex> lock(global_lock);
7483    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7484    if (pCB) {
7485        skipCall |= addCmd(dev_data, pCB, CMD_RESETEVENT, "vkCmdResetEvent()");
7486        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdResetEvent");
7487        pCB->events.push_back(event);
7488        if (!pCB->waitedEvents.count(event)) {
7489            pCB->writeEventsBeforeWait.push_back(event);
7490        }
7491        std::function<bool(VkQueue)> eventUpdate =
7492            std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, VkPipelineStageFlags(0));
7493        pCB->eventUpdates.push_back(eventUpdate);
7494    }
7495    lock.unlock();
7496    if (!skipCall)
7497        dev_data->device_dispatch_table->CmdResetEvent(commandBuffer, event, stageMask);
7498}
7499
7500static bool TransitionImageLayouts(VkCommandBuffer cmdBuffer, uint32_t memBarrierCount,
7501                                   const VkImageMemoryBarrier *pImgMemBarriers) {
7502    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
7503    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
7504    bool skip = false;
7505    uint32_t levelCount = 0;
7506    uint32_t layerCount = 0;
7507
7508    for (uint32_t i = 0; i < memBarrierCount; ++i) {
7509        auto mem_barrier = &pImgMemBarriers[i];
7510        if (!mem_barrier)
7511            continue;
7512        // TODO: Do not iterate over every possibility - consolidate where
7513        // possible
7514        ResolveRemainingLevelsLayers(dev_data, &levelCount, &layerCount, mem_barrier->subresourceRange, mem_barrier->image);
7515
7516        for (uint32_t j = 0; j < levelCount; j++) {
7517            uint32_t level = mem_barrier->subresourceRange.baseMipLevel + j;
7518            for (uint32_t k = 0; k < layerCount; k++) {
7519                uint32_t layer = mem_barrier->subresourceRange.baseArrayLayer + k;
7520                VkImageSubresource sub = {mem_barrier->subresourceRange.aspectMask, level, layer};
7521                IMAGE_CMD_BUF_LAYOUT_NODE node;
7522                if (!FindLayout(pCB, mem_barrier->image, sub, node)) {
7523                    SetLayout(pCB, mem_barrier->image, sub,
7524                              IMAGE_CMD_BUF_LAYOUT_NODE(mem_barrier->oldLayout, mem_barrier->newLayout));
7525                    continue;
7526                }
7527                if (mem_barrier->oldLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
7528                    // TODO: Set memory invalid which is in mem_tracker currently
7529                } else if (node.layout != mem_barrier->oldLayout) {
7530                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
7531                                    __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "You cannot transition the layout from %s "
7532                                                                                    "when current layout is %s.",
7533                                    string_VkImageLayout(mem_barrier->oldLayout), string_VkImageLayout(node.layout));
7534                }
7535                SetLayout(pCB, mem_barrier->image, sub, mem_barrier->newLayout);
7536            }
7537        }
7538    }
7539    return skip;
7540}
7541
7542// Print readable FlagBits in FlagMask
7543static std::string string_VkAccessFlags(VkAccessFlags accessMask) {
7544    std::string result;
7545    std::string separator;
7546
7547    if (accessMask == 0) {
7548        result = "[None]";
7549    } else {
7550        result = "[";
7551        for (auto i = 0; i < 32; i++) {
7552            if (accessMask & (1 << i)) {
7553                result = result + separator + string_VkAccessFlagBits((VkAccessFlagBits)(1 << i));
7554                separator = " | ";
7555            }
7556        }
7557        result = result + "]";
7558    }
7559    return result;
7560}
7561
7562// AccessFlags MUST have 'required_bit' set, and may have one or more of 'optional_bits' set.
7563// If required_bit is zero, accessMask must have at least one of 'optional_bits' set
7564// TODO: Add tracking to ensure that at least one barrier has been set for these layout transitions
7565static bool ValidateMaskBits(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
7566                             const VkImageLayout &layout, VkAccessFlags required_bit, VkAccessFlags optional_bits,
7567                             const char *type) {
7568    bool skip_call = false;
7569
7570    if ((accessMask & required_bit) || (!required_bit && (accessMask & optional_bits))) {
7571        if (accessMask & ~(required_bit | optional_bits)) {
7572            // TODO: Verify against Valid Use
7573            skip_call |=
7574                log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7575                        DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask 0x%X %s are specified when layout is %s.",
7576                        type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
7577        }
7578    } else {
7579        if (!required_bit) {
7580            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7581                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must contain at least one of access bits %d "
7582                                                                  "%s when layout is %s, unless the app has previously added a "
7583                                                                  "barrier for this transition.",
7584                                 type, accessMask, string_VkAccessFlags(accessMask).c_str(), optional_bits,
7585                                 string_VkAccessFlags(optional_bits).c_str(), string_VkImageLayout(layout));
7586        } else {
7587            std::string opt_bits;
7588            if (optional_bits != 0) {
7589                std::stringstream ss;
7590                ss << optional_bits;
7591                opt_bits = "and may have optional bits " + ss.str() + ' ' + string_VkAccessFlags(optional_bits);
7592            }
7593            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7594                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must have required access bit %d %s %s when "
7595                                                                  "layout is %s, unless the app has previously added a barrier for "
7596                                                                  "this transition.",
7597                                 type, accessMask, string_VkAccessFlags(accessMask).c_str(), required_bit,
7598                                 string_VkAccessFlags(required_bit).c_str(), opt_bits.c_str(), string_VkImageLayout(layout));
7599        }
7600    }
7601    return skip_call;
7602}
7603
7604static bool ValidateMaskBitsFromLayouts(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
7605                                        const VkImageLayout &layout, const char *type) {
7606    bool skip_call = false;
7607    switch (layout) {
7608    case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: {
7609        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
7610                                      VK_ACCESS_COLOR_ATTACHMENT_READ_BIT, type);
7611        break;
7612    }
7613    case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: {
7614        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
7615                                      VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT, type);
7616        break;
7617    }
7618    case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: {
7619        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_WRITE_BIT, 0, type);
7620        break;
7621    }
7622    case VK_IMAGE_LAYOUT_PREINITIALIZED: {
7623        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_HOST_WRITE_BIT, 0, type);
7624        break;
7625    }
7626    case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: {
7627        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
7628                                      VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type);
7629        break;
7630    }
7631    case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: {
7632        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
7633                                      VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type);
7634        break;
7635    }
7636    case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: {
7637        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_READ_BIT, 0, type);
7638        break;
7639    }
7640    case VK_IMAGE_LAYOUT_UNDEFINED: {
7641        if (accessMask != 0) {
7642            // TODO: Verify against Valid Use section spec
7643            skip_call |=
7644                log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7645                        DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask 0x%X %s are specified when layout is %s.",
7646                        type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
7647        }
7648        break;
7649    }
7650    case VK_IMAGE_LAYOUT_GENERAL:
7651    default: { break; }
7652    }
7653    return skip_call;
7654}
7655
7656static bool ValidateBarriers(const char *funcName, VkCommandBuffer cmdBuffer, uint32_t memBarrierCount,
7657                             const VkMemoryBarrier *pMemBarriers, uint32_t bufferBarrierCount,
7658                             const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
7659                             const VkImageMemoryBarrier *pImageMemBarriers) {
7660    bool skip_call = false;
7661    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
7662    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
7663    if (pCB->activeRenderPass && memBarrierCount) {
7664        if (!pCB->activeRenderPass->hasSelfDependency[pCB->activeSubpass]) {
7665            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7666                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Barriers cannot be set during subpass %d "
7667                                                                  "with no self dependency specified.",
7668                                 funcName, pCB->activeSubpass);
7669        }
7670    }
7671    for (uint32_t i = 0; i < imageMemBarrierCount; ++i) {
7672        auto mem_barrier = &pImageMemBarriers[i];
7673        auto image_data = getImageNode(dev_data, mem_barrier->image);
7674        if (image_data) {
7675            uint32_t src_q_f_index = mem_barrier->srcQueueFamilyIndex;
7676            uint32_t dst_q_f_index = mem_barrier->dstQueueFamilyIndex;
7677            if (image_data->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
7678                // srcQueueFamilyIndex and dstQueueFamilyIndex must both
7679                // be VK_QUEUE_FAMILY_IGNORED
7680                if ((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) {
7681                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
7682                                         __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
7683                                         "%s: Image Barrier for image 0x%" PRIx64 " was created with sharingMode of "
7684                                         "VK_SHARING_MODE_CONCURRENT.  Src and dst "
7685                                         " queueFamilyIndices must be VK_QUEUE_FAMILY_IGNORED.",
7686                                         funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
7687                }
7688            } else {
7689                // Sharing mode is VK_SHARING_MODE_EXCLUSIVE. srcQueueFamilyIndex and
7690                // dstQueueFamilyIndex must either both be VK_QUEUE_FAMILY_IGNORED,
7691                // or both be a valid queue family
7692                if (((src_q_f_index == VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index == VK_QUEUE_FAMILY_IGNORED)) &&
7693                    (src_q_f_index != dst_q_f_index)) {
7694                    skip_call |=
7695                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7696                                DRAWSTATE_INVALID_QUEUE_INDEX, "DS", "%s: Image 0x%" PRIx64 " was created with sharingMode "
7697                                                                     "of VK_SHARING_MODE_EXCLUSIVE. If one of src- or "
7698                                                                     "dstQueueFamilyIndex is VK_QUEUE_FAMILY_IGNORED, both "
7699                                                                     "must be.",
7700                                funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
7701                } else if (((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) && (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) &&
7702                           ((src_q_f_index >= dev_data->phys_dev_properties.queue_family_properties.size()) ||
7703                            (dst_q_f_index >= dev_data->phys_dev_properties.queue_family_properties.size()))) {
7704                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
7705                                         __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
7706                                         "%s: Image 0x%" PRIx64 " was created with sharingMode "
7707                                         "of VK_SHARING_MODE_EXCLUSIVE, but srcQueueFamilyIndex %d"
7708                                         " or dstQueueFamilyIndex %d is greater than " PRINTF_SIZE_T_SPECIFIER
7709                                         "queueFamilies crated for this device.",
7710                                         funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image), src_q_f_index,
7711                                         dst_q_f_index, dev_data->phys_dev_properties.queue_family_properties.size());
7712                }
7713            }
7714        }
7715
7716        if (mem_barrier) {
7717            skip_call |=
7718                ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->srcAccessMask, mem_barrier->oldLayout, "Source");
7719            skip_call |=
7720                ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->dstAccessMask, mem_barrier->newLayout, "Dest");
7721            if (mem_barrier->newLayout == VK_IMAGE_LAYOUT_UNDEFINED || mem_barrier->newLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
7722                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7723                        DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image Layout cannot be transitioned to UNDEFINED or "
7724                                                         "PREINITIALIZED.",
7725                        funcName);
7726            }
7727            auto image_data = getImageNode(dev_data, mem_barrier->image);
7728            VkFormat format = VK_FORMAT_UNDEFINED;
7729            uint32_t arrayLayers = 0, mipLevels = 0;
7730            bool imageFound = false;
7731            if (image_data) {
7732                format = image_data->createInfo.format;
7733                arrayLayers = image_data->createInfo.arrayLayers;
7734                mipLevels = image_data->createInfo.mipLevels;
7735                imageFound = true;
7736            } else if (dev_data->device_extensions.wsi_enabled) {
7737                auto imageswap_data = getSwapchainFromImage(dev_data, mem_barrier->image);
7738                if (imageswap_data) {
7739                    auto swapchain_data = getSwapchainNode(dev_data, imageswap_data);
7740                    if (swapchain_data) {
7741                        format = swapchain_data->createInfo.imageFormat;
7742                        arrayLayers = swapchain_data->createInfo.imageArrayLayers;
7743                        mipLevels = 1;
7744                        imageFound = true;
7745                    }
7746                }
7747            }
7748            if (imageFound) {
7749                if (vk_format_is_depth_and_stencil(format) &&
7750                    (!(mem_barrier->subresourceRange.aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) ||
7751                     !(mem_barrier->subresourceRange.aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT))) {
7752                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7753                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image is a depth and stencil format and thus must "
7754                                                             "have both VK_IMAGE_ASPECT_DEPTH_BIT and "
7755                                                             "VK_IMAGE_ASPECT_STENCIL_BIT set.",
7756                            funcName);
7757                }
7758                int layerCount = (mem_barrier->subresourceRange.layerCount == VK_REMAINING_ARRAY_LAYERS)
7759                                     ? 1
7760                                     : mem_barrier->subresourceRange.layerCount;
7761                if ((mem_barrier->subresourceRange.baseArrayLayer + layerCount) > arrayLayers) {
7762                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7763                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the "
7764                                                             "baseArrayLayer (%d) and layerCount (%d) be less "
7765                                                             "than or equal to the total number of layers (%d).",
7766                            funcName, mem_barrier->subresourceRange.baseArrayLayer, mem_barrier->subresourceRange.layerCount,
7767                            arrayLayers);
7768                }
7769                int levelCount = (mem_barrier->subresourceRange.levelCount == VK_REMAINING_MIP_LEVELS)
7770                                     ? 1
7771                                     : mem_barrier->subresourceRange.levelCount;
7772                if ((mem_barrier->subresourceRange.baseMipLevel + levelCount) > mipLevels) {
7773                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7774                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the baseMipLevel "
7775                                                             "(%d) and levelCount (%d) be less than or equal to "
7776                                                             "the total number of levels (%d).",
7777                            funcName, mem_barrier->subresourceRange.baseMipLevel, mem_barrier->subresourceRange.levelCount,
7778                            mipLevels);
7779                }
7780            }
7781        }
7782    }
7783    for (uint32_t i = 0; i < bufferBarrierCount; ++i) {
7784        auto mem_barrier = &pBufferMemBarriers[i];
7785        if (pCB->activeRenderPass) {
7786            skip_call |=
7787                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7788                        DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barriers cannot be used during a render pass.", funcName);
7789        }
7790        if (!mem_barrier)
7791            continue;
7792
7793        // Validate buffer barrier queue family indices
7794        if ((mem_barrier->srcQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
7795             mem_barrier->srcQueueFamilyIndex >= dev_data->phys_dev_properties.queue_family_properties.size()) ||
7796            (mem_barrier->dstQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
7797             mem_barrier->dstQueueFamilyIndex >= dev_data->phys_dev_properties.queue_family_properties.size())) {
7798            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7799                                 DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
7800                                 "%s: Buffer Barrier 0x%" PRIx64 " has QueueFamilyIndex greater "
7801                                 "than the number of QueueFamilies (" PRINTF_SIZE_T_SPECIFIER ") for this device.",
7802                                 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
7803                                 dev_data->phys_dev_properties.queue_family_properties.size());
7804        }
7805
7806        auto buffer_node = getBufferNode(dev_data, mem_barrier->buffer);
7807        if (buffer_node) {
7808            VkDeviceSize buffer_size =
7809                (buffer_node->createInfo.sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO) ? buffer_node->createInfo.size : 0;
7810            if (mem_barrier->offset >= buffer_size) {
7811                skip_call |= log_msg(
7812                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7813                    DRAWSTATE_INVALID_BARRIER, "DS",
7814                    "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64 " which is not less than total size 0x%" PRIx64 ".",
7815                    funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
7816                    reinterpret_cast<const uint64_t &>(mem_barrier->offset), reinterpret_cast<const uint64_t &>(buffer_size));
7817            } else if (mem_barrier->size != VK_WHOLE_SIZE && (mem_barrier->offset + mem_barrier->size > buffer_size)) {
7818                skip_call |= log_msg(
7819                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7820                    DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64 " and size 0x%" PRIx64
7821                                                     " whose sum is greater than total size 0x%" PRIx64 ".",
7822                    funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
7823                    reinterpret_cast<const uint64_t &>(mem_barrier->offset), reinterpret_cast<const uint64_t &>(mem_barrier->size),
7824                    reinterpret_cast<const uint64_t &>(buffer_size));
7825            }
7826        }
7827    }
7828    return skip_call;
7829}
7830
7831bool validateEventStageMask(VkQueue queue, GLOBAL_CB_NODE *pCB, uint32_t eventCount, size_t firstEventIndex, VkPipelineStageFlags sourceStageMask) {
7832    bool skip_call = false;
7833    VkPipelineStageFlags stageMask = 0;
7834    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
7835    for (uint32_t i = 0; i < eventCount; ++i) {
7836        auto event = pCB->events[firstEventIndex + i];
7837        auto queue_data = dev_data->queueMap.find(queue);
7838        if (queue_data == dev_data->queueMap.end())
7839            return false;
7840        auto event_data = queue_data->second.eventToStageMap.find(event);
7841        if (event_data != queue_data->second.eventToStageMap.end()) {
7842            stageMask |= event_data->second;
7843        } else {
7844            auto global_event_data = dev_data->eventMap.find(event);
7845            if (global_event_data == dev_data->eventMap.end()) {
7846                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
7847                                     reinterpret_cast<const uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
7848                                     "Event 0x%" PRIx64 " cannot be waited on if it has never been set.",
7849                                     reinterpret_cast<const uint64_t &>(event));
7850            } else {
7851                stageMask |= global_event_data->second.stageMask;
7852            }
7853        }
7854    }
7855    // TODO: Need to validate that host_bit is only set if set event is called
7856    // but set event can be called at any time.
7857    if (sourceStageMask != stageMask && sourceStageMask != (stageMask | VK_PIPELINE_STAGE_HOST_BIT)) {
7858        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7859                             DRAWSTATE_INVALID_EVENT, "DS", "Submitting cmdbuffer with call to VkCmdWaitEvents "
7860                                                            "using srcStageMask 0x%X which must be the bitwise "
7861                                                            "OR of the stageMask parameters used in calls to "
7862                                                            "vkCmdSetEvent and VK_PIPELINE_STAGE_HOST_BIT if "
7863                                                            "used with vkSetEvent but instead is 0x%X.",
7864                             sourceStageMask, stageMask);
7865    }
7866    return skip_call;
7867}
7868
7869VKAPI_ATTR void VKAPI_CALL
7870CmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents, VkPipelineStageFlags sourceStageMask,
7871              VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
7872              uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
7873              uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
7874    bool skipCall = false;
7875    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7876    std::unique_lock<std::mutex> lock(global_lock);
7877    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7878    if (pCB) {
7879        auto firstEventIndex = pCB->events.size();
7880        for (uint32_t i = 0; i < eventCount; ++i) {
7881            pCB->waitedEvents.insert(pEvents[i]);
7882            pCB->events.push_back(pEvents[i]);
7883        }
7884        std::function<bool(VkQueue)> eventUpdate =
7885            std::bind(validateEventStageMask, std::placeholders::_1, pCB, eventCount, firstEventIndex, sourceStageMask);
7886        pCB->eventUpdates.push_back(eventUpdate);
7887        if (pCB->state == CB_RECORDING) {
7888            skipCall |= addCmd(dev_data, pCB, CMD_WAITEVENTS, "vkCmdWaitEvents()");
7889        } else {
7890            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWaitEvents()");
7891        }
7892        skipCall |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
7893        skipCall |=
7894            ValidateBarriers("vkCmdWaitEvents", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
7895                             pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
7896    }
7897    lock.unlock();
7898    if (!skipCall)
7899        dev_data->device_dispatch_table->CmdWaitEvents(commandBuffer, eventCount, pEvents, sourceStageMask, dstStageMask,
7900                                                       memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
7901                                                       pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
7902}
7903
7904VKAPI_ATTR void VKAPI_CALL
7905CmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
7906                   VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
7907                   uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
7908                   uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
7909    bool skipCall = false;
7910    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7911    std::unique_lock<std::mutex> lock(global_lock);
7912    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7913    if (pCB) {
7914        skipCall |= addCmd(dev_data, pCB, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()");
7915        skipCall |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
7916        skipCall |=
7917            ValidateBarriers("vkCmdPipelineBarrier", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
7918                             pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
7919    }
7920    lock.unlock();
7921    if (!skipCall)
7922        dev_data->device_dispatch_table->CmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags,
7923                                                            memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
7924                                                            pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
7925}
7926
7927bool setQueryState(VkQueue queue, VkCommandBuffer commandBuffer, QueryObject object, bool value) {
7928    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7929    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7930    if (pCB) {
7931        pCB->queryToStateMap[object] = value;
7932    }
7933    auto queue_data = dev_data->queueMap.find(queue);
7934    if (queue_data != dev_data->queueMap.end()) {
7935        queue_data->second.queryToStateMap[object] = value;
7936    }
7937    return false;
7938}
7939
7940VKAPI_ATTR void VKAPI_CALL
7941CmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) {
7942    bool skipCall = false;
7943    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7944    std::unique_lock<std::mutex> lock(global_lock);
7945    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7946    if (pCB) {
7947        QueryObject query = {queryPool, slot};
7948        pCB->activeQueries.insert(query);
7949        if (!pCB->startedQueries.count(query)) {
7950            pCB->startedQueries.insert(query);
7951        }
7952        skipCall |= addCmd(dev_data, pCB, CMD_BEGINQUERY, "vkCmdBeginQuery()");
7953    }
7954    lock.unlock();
7955    if (!skipCall)
7956        dev_data->device_dispatch_table->CmdBeginQuery(commandBuffer, queryPool, slot, flags);
7957}
7958
7959VKAPI_ATTR void VKAPI_CALL CmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) {
7960    bool skipCall = false;
7961    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7962    std::unique_lock<std::mutex> lock(global_lock);
7963    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7964    if (pCB) {
7965        QueryObject query = {queryPool, slot};
7966        if (!pCB->activeQueries.count(query)) {
7967            skipCall |=
7968                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7969                        DRAWSTATE_INVALID_QUERY, "DS", "Ending a query before it was started: queryPool 0x%" PRIx64 ", index %d",
7970                        (uint64_t)(queryPool), slot);
7971        } else {
7972            pCB->activeQueries.erase(query);
7973        }
7974        std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, true);
7975        pCB->queryUpdates.push_back(queryUpdate);
7976        if (pCB->state == CB_RECORDING) {
7977            skipCall |= addCmd(dev_data, pCB, CMD_ENDQUERY, "VkCmdEndQuery()");
7978        } else {
7979            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdEndQuery()");
7980        }
7981    }
7982    lock.unlock();
7983    if (!skipCall)
7984        dev_data->device_dispatch_table->CmdEndQuery(commandBuffer, queryPool, slot);
7985}
7986
7987VKAPI_ATTR void VKAPI_CALL
7988CmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) {
7989    bool skipCall = false;
7990    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7991    std::unique_lock<std::mutex> lock(global_lock);
7992    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7993    if (pCB) {
7994        for (uint32_t i = 0; i < queryCount; i++) {
7995            QueryObject query = {queryPool, firstQuery + i};
7996            pCB->waitedEventsBeforeQueryReset[query] = pCB->waitedEvents;
7997            std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, false);
7998            pCB->queryUpdates.push_back(queryUpdate);
7999        }
8000        if (pCB->state == CB_RECORDING) {
8001            skipCall |= addCmd(dev_data, pCB, CMD_RESETQUERYPOOL, "VkCmdResetQueryPool()");
8002        } else {
8003            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdResetQueryPool()");
8004        }
8005        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdQueryPool");
8006    }
8007    lock.unlock();
8008    if (!skipCall)
8009        dev_data->device_dispatch_table->CmdResetQueryPool(commandBuffer, queryPool, firstQuery, queryCount);
8010}
8011
8012bool validateQuery(VkQueue queue, GLOBAL_CB_NODE *pCB, VkQueryPool queryPool, uint32_t queryCount, uint32_t firstQuery) {
8013    bool skip_call = false;
8014    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(pCB->commandBuffer), layer_data_map);
8015    auto queue_data = dev_data->queueMap.find(queue);
8016    if (queue_data == dev_data->queueMap.end())
8017        return false;
8018    for (uint32_t i = 0; i < queryCount; i++) {
8019        QueryObject query = {queryPool, firstQuery + i};
8020        auto query_data = queue_data->second.queryToStateMap.find(query);
8021        bool fail = false;
8022        if (query_data != queue_data->second.queryToStateMap.end()) {
8023            if (!query_data->second) {
8024                fail = true;
8025            }
8026        } else {
8027            auto global_query_data = dev_data->queryToStateMap.find(query);
8028            if (global_query_data != dev_data->queryToStateMap.end()) {
8029                if (!global_query_data->second) {
8030                    fail = true;
8031                }
8032            } else {
8033                fail = true;
8034            }
8035        }
8036        if (fail) {
8037            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8038                                 DRAWSTATE_INVALID_QUERY, "DS",
8039                                 "Requesting a copy from query to buffer with invalid query: queryPool 0x%" PRIx64 ", index %d",
8040                                 reinterpret_cast<uint64_t &>(queryPool), firstQuery + i);
8041        }
8042    }
8043    return skip_call;
8044}
8045
8046VKAPI_ATTR void VKAPI_CALL
8047CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
8048                        VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags) {
8049    bool skipCall = false;
8050    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8051    std::unique_lock<std::mutex> lock(global_lock);
8052    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8053#if MTMERGESOURCE
8054    VkDeviceMemory mem;
8055    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8056    skipCall |=
8057        get_mem_binding_from_object(dev_data, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
8058    if (cb_data != dev_data->commandBufferMap.end()) {
8059        std::function<bool()> function = [=]() {
8060            set_memory_valid(dev_data, mem, true);
8061            return false;
8062        };
8063        cb_data->second->validate_functions.push_back(function);
8064    }
8065    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyQueryPoolResults");
8066    // Validate that DST buffer has correct usage flags set
8067    skipCall |= validate_buffer_usage_flags(dev_data, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8068                                            "vkCmdCopyQueryPoolResults()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8069#endif
8070    if (pCB) {
8071        std::function<bool(VkQueue)> queryUpdate =
8072            std::bind(validateQuery, std::placeholders::_1, pCB, queryPool, queryCount, firstQuery);
8073        pCB->queryUpdates.push_back(queryUpdate);
8074        if (pCB->state == CB_RECORDING) {
8075            skipCall |= addCmd(dev_data, pCB, CMD_COPYQUERYPOOLRESULTS, "vkCmdCopyQueryPoolResults()");
8076        } else {
8077            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdCopyQueryPoolResults()");
8078        }
8079        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyQueryPoolResults");
8080    }
8081    lock.unlock();
8082    if (!skipCall)
8083        dev_data->device_dispatch_table->CmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer,
8084                                                                 dstOffset, stride, flags);
8085}
8086
8087VKAPI_ATTR void VKAPI_CALL CmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout,
8088                                            VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size,
8089                                            const void *pValues) {
8090    bool skipCall = false;
8091    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8092    std::unique_lock<std::mutex> lock(global_lock);
8093    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8094    if (pCB) {
8095        if (pCB->state == CB_RECORDING) {
8096            skipCall |= addCmd(dev_data, pCB, CMD_PUSHCONSTANTS, "vkCmdPushConstants()");
8097        } else {
8098            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdPushConstants()");
8099        }
8100    }
8101    skipCall |= validatePushConstantRange(dev_data, offset, size, "vkCmdPushConstants()");
8102    if (0 == stageFlags) {
8103        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8104                            DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCmdPushConstants() call has no stageFlags set.");
8105    }
8106
8107    // Check if push constant update is within any of the ranges with the same stage flags specified in pipeline layout.
8108    auto pipeline_layout = getPipelineLayout(dev_data, layout);
8109    if (!pipeline_layout) {
8110        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8111                            DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCmdPushConstants() Pipeline Layout 0x%" PRIx64 " not found.",
8112                            (uint64_t)layout);
8113    } else {
8114        // Coalesce adjacent/overlapping pipeline ranges before checking to see if incoming range is
8115        // contained in the pipeline ranges.
8116        // Build a {start, end} span list for ranges with matching stage flags.
8117        const auto &ranges = pipeline_layout->pushConstantRanges;
8118        struct span {
8119            uint32_t start;
8120            uint32_t end;
8121        };
8122        std::vector<span> spans;
8123        spans.reserve(ranges.size());
8124        for (const auto &iter : ranges) {
8125            if (iter.stageFlags == stageFlags) {
8126                spans.push_back({iter.offset, iter.offset + iter.size});
8127            }
8128        }
8129        if (spans.size() == 0) {
8130            // There were no ranges that matched the stageFlags.
8131            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8132                                DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS",
8133                                "vkCmdPushConstants() stageFlags = 0x%" PRIx32 " do not match "
8134                                "the stageFlags in any of the ranges in pipeline layout 0x%" PRIx64 ".",
8135                                (uint32_t)stageFlags, (uint64_t)layout);
8136        } else {
8137            // Sort span list by start value.
8138            struct comparer {
8139                bool operator()(struct span i, struct span j) { return i.start < j.start; }
8140            } my_comparer;
8141            std::sort(spans.begin(), spans.end(), my_comparer);
8142
8143            // Examine two spans at a time.
8144            std::vector<span>::iterator current = spans.begin();
8145            std::vector<span>::iterator next = current + 1;
8146            while (next != spans.end()) {
8147                if (current->end < next->start) {
8148                    // There is a gap; cannot coalesce. Move to the next two spans.
8149                    ++current;
8150                    ++next;
8151                } else {
8152                    // Coalesce the two spans.  The start of the next span
8153                    // is within the current span, so pick the larger of
8154                    // the end values to extend the current span.
8155                    // Then delete the next span and set next to the span after it.
8156                    current->end = max(current->end, next->end);
8157                    next = spans.erase(next);
8158                }
8159            }
8160
8161            // Now we can check if the incoming range is within any of the spans.
8162            bool contained_in_a_range = false;
8163            for (uint32_t i = 0; i < spans.size(); ++i) {
8164                if ((offset >= spans[i].start) && ((uint64_t)offset + (uint64_t)size <= (uint64_t)spans[i].end)) {
8165                    contained_in_a_range = true;
8166                    break;
8167                }
8168            }
8169            if (!contained_in_a_range) {
8170                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8171                                    __LINE__, DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS",
8172                                    "vkCmdPushConstants() Push constant range [%d, %d) "
8173                                    "with stageFlags = 0x%" PRIx32 " "
8174                                    "not within flag-matching ranges in pipeline layout 0x%" PRIx64 ".",
8175                                    offset, offset + size, (uint32_t)stageFlags, (uint64_t)layout);
8176            }
8177        }
8178    }
8179    lock.unlock();
8180    if (!skipCall)
8181        dev_data->device_dispatch_table->CmdPushConstants(commandBuffer, layout, stageFlags, offset, size, pValues);
8182}
8183
8184VKAPI_ATTR void VKAPI_CALL
8185CmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t slot) {
8186    bool skipCall = false;
8187    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8188    std::unique_lock<std::mutex> lock(global_lock);
8189    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8190    if (pCB) {
8191        QueryObject query = {queryPool, slot};
8192        std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, true);
8193        pCB->queryUpdates.push_back(queryUpdate);
8194        if (pCB->state == CB_RECORDING) {
8195            skipCall |= addCmd(dev_data, pCB, CMD_WRITETIMESTAMP, "vkCmdWriteTimestamp()");
8196        } else {
8197            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWriteTimestamp()");
8198        }
8199    }
8200    lock.unlock();
8201    if (!skipCall)
8202        dev_data->device_dispatch_table->CmdWriteTimestamp(commandBuffer, pipelineStage, queryPool, slot);
8203}
8204
8205static bool MatchUsage(layer_data *dev_data, uint32_t count, const VkAttachmentReference *attachments,
8206                       const VkFramebufferCreateInfo *fbci, VkImageUsageFlagBits usage_flag) {
8207    bool skip_call = false;
8208
8209    for (uint32_t attach = 0; attach < count; attach++) {
8210        if (attachments[attach].attachment != VK_ATTACHMENT_UNUSED) {
8211            // Attachment counts are verified elsewhere, but prevent an invalid access
8212            if (attachments[attach].attachment < fbci->attachmentCount) {
8213                const VkImageView *image_view = &fbci->pAttachments[attachments[attach].attachment];
8214                VkImageViewCreateInfo *ivci = getImageViewData(dev_data, *image_view);
8215                if (ivci != nullptr) {
8216                    const VkImageCreateInfo *ici = &getImageNode(dev_data, ivci->image)->createInfo;
8217                    if (ici != nullptr) {
8218                        if ((ici->usage & usage_flag) == 0) {
8219                            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
8220                                                 (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_USAGE, "DS",
8221                                                 "vkCreateFramebuffer:  Framebuffer Attachment (%d) conflicts with the image's "
8222                                                 "IMAGE_USAGE flags (%s).",
8223                                                 attachments[attach].attachment, string_VkImageUsageFlagBits(usage_flag));
8224                        }
8225                    }
8226                }
8227            }
8228        }
8229    }
8230    return skip_call;
8231}
8232
8233static bool ValidateAttachmentImageUsage(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) {
8234    bool skip_call = false;
8235
8236    const VkRenderPassCreateInfo *rpci = getRenderPass(dev_data, pCreateInfo->renderPass)->pCreateInfo;
8237    if (rpci != nullptr) {
8238        for (uint32_t subpass = 0; subpass < rpci->subpassCount; subpass++) {
8239            // Verify input attachments:
8240            skip_call |= MatchUsage(dev_data, rpci->pSubpasses[subpass].inputAttachmentCount,
8241                                    rpci->pSubpasses[subpass].pInputAttachments, pCreateInfo, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT);
8242            // Verify color attachments:
8243            skip_call |= MatchUsage(dev_data, rpci->pSubpasses[subpass].colorAttachmentCount,
8244                                    rpci->pSubpasses[subpass].pColorAttachments, pCreateInfo, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT);
8245            // Verify depth/stencil attachments:
8246            if (rpci->pSubpasses[subpass].pDepthStencilAttachment != nullptr) {
8247                skip_call |= MatchUsage(dev_data, 1, rpci->pSubpasses[subpass].pDepthStencilAttachment, pCreateInfo,
8248                                        VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT);
8249            }
8250        }
8251    }
8252    return skip_call;
8253}
8254
8255VKAPI_ATTR VkResult VKAPI_CALL CreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo,
8256                                                 const VkAllocationCallbacks *pAllocator,
8257                                                 VkFramebuffer *pFramebuffer) {
8258    bool skip_call = false;
8259    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
8260    // TODO : Verify that renderPass FB is created with is compatible with FB
8261    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
8262
8263    skip_call |= ValidateAttachmentImageUsage(dev_data, pCreateInfo);
8264    if (skip_call == false) {
8265        result = dev_data->device_dispatch_table->CreateFramebuffer(device, pCreateInfo, pAllocator, pFramebuffer);
8266    }
8267    if (VK_SUCCESS == result) {
8268        // Shadow create info and store in map
8269        std::lock_guard<std::mutex> lock(global_lock);
8270
8271        auto & fbNode = dev_data->frameBufferMap[*pFramebuffer];
8272        fbNode.createInfo = *pCreateInfo;
8273        if (pCreateInfo->pAttachments) {
8274            auto attachments = new VkImageView[pCreateInfo->attachmentCount];
8275            memcpy(attachments,
8276                   pCreateInfo->pAttachments,
8277                   pCreateInfo->attachmentCount * sizeof(VkImageView));
8278            fbNode.createInfo.pAttachments = attachments;
8279        }
8280        for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
8281            VkImageView view = pCreateInfo->pAttachments[i];
8282            auto view_data = getImageViewData(dev_data, view);
8283            if (!view_data) {
8284                continue;
8285            }
8286            MT_FB_ATTACHMENT_INFO fb_info;
8287            get_mem_binding_from_object(dev_data, (uint64_t)(view_data->image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
8288                                        &fb_info.mem);
8289            fb_info.image = view_data->image;
8290            fbNode.attachments.push_back(fb_info);
8291        }
8292    }
8293    return result;
8294}
8295
8296static bool FindDependency(const int index, const int dependent, const std::vector<DAGNode> &subpass_to_node,
8297                           std::unordered_set<uint32_t> &processed_nodes) {
8298    // If we have already checked this node we have not found a dependency path so return false.
8299    if (processed_nodes.count(index))
8300        return false;
8301    processed_nodes.insert(index);
8302    const DAGNode &node = subpass_to_node[index];
8303    // Look for a dependency path. If one exists return true else recurse on the previous nodes.
8304    if (std::find(node.prev.begin(), node.prev.end(), dependent) == node.prev.end()) {
8305        for (auto elem : node.prev) {
8306            if (FindDependency(elem, dependent, subpass_to_node, processed_nodes))
8307                return true;
8308        }
8309    } else {
8310        return true;
8311    }
8312    return false;
8313}
8314
8315static bool CheckDependencyExists(const layer_data *my_data, const int subpass, const std::vector<uint32_t> &dependent_subpasses,
8316                                  const std::vector<DAGNode> &subpass_to_node, bool &skip_call) {
8317    bool result = true;
8318    // Loop through all subpasses that share the same attachment and make sure a dependency exists
8319    for (uint32_t k = 0; k < dependent_subpasses.size(); ++k) {
8320        if (static_cast<uint32_t>(subpass) == dependent_subpasses[k])
8321            continue;
8322        const DAGNode &node = subpass_to_node[subpass];
8323        // Check for a specified dependency between the two nodes. If one exists we are done.
8324        auto prev_elem = std::find(node.prev.begin(), node.prev.end(), dependent_subpasses[k]);
8325        auto next_elem = std::find(node.next.begin(), node.next.end(), dependent_subpasses[k]);
8326        if (prev_elem == node.prev.end() && next_elem == node.next.end()) {
8327            // If no dependency exits an implicit dependency still might. If not, throw an error.
8328            std::unordered_set<uint32_t> processed_nodes;
8329            if (!(FindDependency(subpass, dependent_subpasses[k], subpass_to_node, processed_nodes) ||
8330                FindDependency(dependent_subpasses[k], subpass, subpass_to_node, processed_nodes))) {
8331                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8332                                     __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
8333                                     "A dependency between subpasses %d and %d must exist but one is not specified.", subpass,
8334                                     dependent_subpasses[k]);
8335                result = false;
8336            }
8337        }
8338    }
8339    return result;
8340}
8341
8342static bool CheckPreserved(const layer_data *my_data, const VkRenderPassCreateInfo *pCreateInfo, const int index,
8343                           const uint32_t attachment, const std::vector<DAGNode> &subpass_to_node, int depth, bool &skip_call) {
8344    const DAGNode &node = subpass_to_node[index];
8345    // If this node writes to the attachment return true as next nodes need to preserve the attachment.
8346    const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
8347    for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8348        if (attachment == subpass.pColorAttachments[j].attachment)
8349            return true;
8350    }
8351    if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
8352        if (attachment == subpass.pDepthStencilAttachment->attachment)
8353            return true;
8354    }
8355    bool result = false;
8356    // Loop through previous nodes and see if any of them write to the attachment.
8357    for (auto elem : node.prev) {
8358        result |= CheckPreserved(my_data, pCreateInfo, elem, attachment, subpass_to_node, depth + 1, skip_call);
8359    }
8360    // If the attachment was written to by a previous node than this node needs to preserve it.
8361    if (result && depth > 0) {
8362        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
8363        bool has_preserved = false;
8364        for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
8365            if (subpass.pPreserveAttachments[j] == attachment) {
8366                has_preserved = true;
8367                break;
8368            }
8369        }
8370        if (!has_preserved) {
8371            skip_call |=
8372                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8373                        DRAWSTATE_INVALID_RENDERPASS, "DS",
8374                        "Attachment %d is used by a later subpass and must be preserved in subpass %d.", attachment, index);
8375        }
8376    }
8377    return result;
8378}
8379
8380template <class T> bool isRangeOverlapping(T offset1, T size1, T offset2, T size2) {
8381    return (((offset1 + size1) > offset2) && ((offset1 + size1) < (offset2 + size2))) ||
8382           ((offset1 > offset2) && (offset1 < (offset2 + size2)));
8383}
8384
8385bool isRegionOverlapping(VkImageSubresourceRange range1, VkImageSubresourceRange range2) {
8386    return (isRangeOverlapping(range1.baseMipLevel, range1.levelCount, range2.baseMipLevel, range2.levelCount) &&
8387            isRangeOverlapping(range1.baseArrayLayer, range1.layerCount, range2.baseArrayLayer, range2.layerCount));
8388}
8389
8390static bool ValidateDependencies(const layer_data *my_data, FRAMEBUFFER_NODE const * framebuffer,
8391                                 RENDER_PASS_NODE const * renderPass) {
8392    bool skip_call = false;
8393    const VkFramebufferCreateInfo *pFramebufferInfo = &framebuffer->createInfo;
8394    const VkRenderPassCreateInfo *pCreateInfo = renderPass->pCreateInfo;
8395    auto const & subpass_to_node = renderPass->subpassToNode;
8396    std::vector<std::vector<uint32_t>> output_attachment_to_subpass(pCreateInfo->attachmentCount);
8397    std::vector<std::vector<uint32_t>> input_attachment_to_subpass(pCreateInfo->attachmentCount);
8398    std::vector<std::vector<uint32_t>> overlapping_attachments(pCreateInfo->attachmentCount);
8399    // Find overlapping attachments
8400    for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
8401        for (uint32_t j = i + 1; j < pCreateInfo->attachmentCount; ++j) {
8402            VkImageView viewi = pFramebufferInfo->pAttachments[i];
8403            VkImageView viewj = pFramebufferInfo->pAttachments[j];
8404            if (viewi == viewj) {
8405                overlapping_attachments[i].push_back(j);
8406                overlapping_attachments[j].push_back(i);
8407                continue;
8408            }
8409            auto view_data_i = getImageViewData(my_data, viewi);
8410            auto view_data_j = getImageViewData(my_data, viewj);
8411            if (!view_data_i || !view_data_j) {
8412                continue;
8413            }
8414            if (view_data_i->image == view_data_j->image &&
8415                isRegionOverlapping(view_data_i->subresourceRange, view_data_j->subresourceRange)) {
8416                overlapping_attachments[i].push_back(j);
8417                overlapping_attachments[j].push_back(i);
8418                continue;
8419            }
8420            auto image_data_i = getImageNode(my_data, view_data_i->image);
8421            auto image_data_j = getImageNode(my_data, view_data_j->image);
8422            if (!image_data_i || !image_data_j) {
8423                continue;
8424            }
8425            if (image_data_i->mem == image_data_j->mem && isRangeOverlapping(image_data_i->memOffset, image_data_i->memSize,
8426                                                                             image_data_j->memOffset, image_data_j->memSize)) {
8427                overlapping_attachments[i].push_back(j);
8428                overlapping_attachments[j].push_back(i);
8429            }
8430        }
8431    }
8432    for (uint32_t i = 0; i < overlapping_attachments.size(); ++i) {
8433        uint32_t attachment = i;
8434        for (auto other_attachment : overlapping_attachments[i]) {
8435            if (!(pCreateInfo->pAttachments[attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
8436                skip_call |=
8437                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8438                            DRAWSTATE_INVALID_RENDERPASS, "DS", "Attachment %d aliases attachment %d but doesn't "
8439                                                                "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
8440                            attachment, other_attachment);
8441            }
8442            if (!(pCreateInfo->pAttachments[other_attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
8443                skip_call |=
8444                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8445                            DRAWSTATE_INVALID_RENDERPASS, "DS", "Attachment %d aliases attachment %d but doesn't "
8446                                                                "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
8447                            other_attachment, attachment);
8448            }
8449        }
8450    }
8451    // Find for each attachment the subpasses that use them.
8452    unordered_set<uint32_t> attachmentIndices;
8453    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8454        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8455        attachmentIndices.clear();
8456        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8457            uint32_t attachment = subpass.pInputAttachments[j].attachment;
8458            input_attachment_to_subpass[attachment].push_back(i);
8459            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
8460                input_attachment_to_subpass[overlapping_attachment].push_back(i);
8461            }
8462        }
8463        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8464            uint32_t attachment = subpass.pColorAttachments[j].attachment;
8465            output_attachment_to_subpass[attachment].push_back(i);
8466            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
8467                output_attachment_to_subpass[overlapping_attachment].push_back(i);
8468            }
8469            attachmentIndices.insert(attachment);
8470        }
8471        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
8472            uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
8473            output_attachment_to_subpass[attachment].push_back(i);
8474            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
8475                output_attachment_to_subpass[overlapping_attachment].push_back(i);
8476            }
8477
8478            if (attachmentIndices.count(attachment)) {
8479                skip_call |=
8480                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
8481                            0, __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
8482                            "Cannot use same attachment (%u) as both color and depth output in same subpass (%u).",
8483                            attachment, i);
8484            }
8485        }
8486    }
8487    // If there is a dependency needed make sure one exists
8488    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8489        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8490        // If the attachment is an input then all subpasses that output must have a dependency relationship
8491        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8492            const uint32_t &attachment = subpass.pInputAttachments[j].attachment;
8493            CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
8494        }
8495        // If the attachment is an output then all subpasses that use the attachment must have a dependency relationship
8496        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8497            const uint32_t &attachment = subpass.pColorAttachments[j].attachment;
8498            CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
8499            CheckDependencyExists(my_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
8500        }
8501        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
8502            const uint32_t &attachment = subpass.pDepthStencilAttachment->attachment;
8503            CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
8504            CheckDependencyExists(my_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
8505        }
8506    }
8507    // Loop through implicit dependencies, if this pass reads make sure the attachment is preserved for all passes after it was
8508    // written.
8509    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8510        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8511        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8512            CheckPreserved(my_data, pCreateInfo, i, subpass.pInputAttachments[j].attachment, subpass_to_node, 0, skip_call);
8513        }
8514    }
8515    return skip_call;
8516}
8517// ValidateLayoutVsAttachmentDescription is a general function where we can validate various state associated with the
8518// VkAttachmentDescription structs that are used by the sub-passes of a renderpass. Initial check is to make sure that
8519// READ_ONLY layout attachments don't have CLEAR as their loadOp.
8520static bool ValidateLayoutVsAttachmentDescription(debug_report_data *report_data, const VkImageLayout first_layout,
8521                                                  const uint32_t attachment,
8522                                                  const VkAttachmentDescription &attachment_description) {
8523    bool skip_call = false;
8524    // Verify that initial loadOp on READ_ONLY attachments is not CLEAR
8525    if (attachment_description.loadOp == VK_ATTACHMENT_LOAD_OP_CLEAR) {
8526        if ((first_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) ||
8527            (first_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL)) {
8528            skip_call |=
8529                log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
8530                        VkDebugReportObjectTypeEXT(0), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8531                        "Cannot clear attachment %d with invalid first layout %s.", attachment, string_VkImageLayout(first_layout));
8532        }
8533    }
8534    return skip_call;
8535}
8536
8537static bool ValidateLayouts(const layer_data *my_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo) {
8538    bool skip = false;
8539
8540    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8541        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8542        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8543            if (subpass.pInputAttachments[j].layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL &&
8544                subpass.pInputAttachments[j].layout != VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) {
8545                if (subpass.pInputAttachments[j].layout == VK_IMAGE_LAYOUT_GENERAL) {
8546                    // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance
8547                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
8548                                    (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8549                                    "Layout for input attachment is GENERAL but should be READ_ONLY_OPTIMAL.");
8550                } else {
8551                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8552                                    DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8553                                    "Layout for input attachment is %s but can only be READ_ONLY_OPTIMAL or GENERAL.",
8554                                    string_VkImageLayout(subpass.pInputAttachments[j].layout));
8555                }
8556            }
8557            auto attach_index = subpass.pInputAttachments[j].attachment;
8558            skip |= ValidateLayoutVsAttachmentDescription(my_data->report_data, subpass.pInputAttachments[j].layout, attach_index,
8559                                                          pCreateInfo->pAttachments[attach_index]);
8560        }
8561        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8562            if (subpass.pColorAttachments[j].layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) {
8563                if (subpass.pColorAttachments[j].layout == VK_IMAGE_LAYOUT_GENERAL) {
8564                    // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance
8565                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
8566                                    (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8567                                    "Layout for color attachment is GENERAL but should be COLOR_ATTACHMENT_OPTIMAL.");
8568                } else {
8569                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8570                                    DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8571                                    "Layout for color attachment is %s but can only be COLOR_ATTACHMENT_OPTIMAL or GENERAL.",
8572                                    string_VkImageLayout(subpass.pColorAttachments[j].layout));
8573                }
8574            }
8575            auto attach_index = subpass.pColorAttachments[j].attachment;
8576            skip |= ValidateLayoutVsAttachmentDescription(my_data->report_data, subpass.pColorAttachments[j].layout, attach_index,
8577                                                          pCreateInfo->pAttachments[attach_index]);
8578        }
8579        if ((subpass.pDepthStencilAttachment != NULL) && (subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) {
8580            if (subpass.pDepthStencilAttachment->layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL) {
8581                if (subpass.pDepthStencilAttachment->layout == VK_IMAGE_LAYOUT_GENERAL) {
8582                    // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance
8583                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
8584                                    (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8585                                    "Layout for depth attachment is GENERAL but should be DEPTH_STENCIL_ATTACHMENT_OPTIMAL.");
8586                } else {
8587                    skip |=
8588                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8589                                DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8590                                "Layout for depth attachment is %s but can only be DEPTH_STENCIL_ATTACHMENT_OPTIMAL or GENERAL.",
8591                                string_VkImageLayout(subpass.pDepthStencilAttachment->layout));
8592                }
8593            }
8594            auto attach_index = subpass.pDepthStencilAttachment->attachment;
8595            skip |= ValidateLayoutVsAttachmentDescription(my_data->report_data, subpass.pDepthStencilAttachment->layout,
8596                                                          attach_index, pCreateInfo->pAttachments[attach_index]);
8597        }
8598    }
8599    return skip;
8600}
8601
8602static bool CreatePassDAG(const layer_data *my_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
8603                          std::vector<DAGNode> &subpass_to_node, std::vector<bool> &has_self_dependency) {
8604    bool skip_call = false;
8605    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8606        DAGNode &subpass_node = subpass_to_node[i];
8607        subpass_node.pass = i;
8608    }
8609    for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
8610        const VkSubpassDependency &dependency = pCreateInfo->pDependencies[i];
8611        if (dependency.srcSubpass > dependency.dstSubpass && dependency.srcSubpass != VK_SUBPASS_EXTERNAL &&
8612            dependency.dstSubpass != VK_SUBPASS_EXTERNAL) {
8613            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8614                                 DRAWSTATE_INVALID_RENDERPASS, "DS",
8615                                 "Depedency graph must be specified such that an earlier pass cannot depend on a later pass.");
8616        } else if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL && dependency.dstSubpass == VK_SUBPASS_EXTERNAL) {
8617            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8618                                 DRAWSTATE_INVALID_RENDERPASS, "DS", "The src and dest subpasses cannot both be external.");
8619        } else if (dependency.srcSubpass == dependency.dstSubpass) {
8620            has_self_dependency[dependency.srcSubpass] = true;
8621        }
8622        if (dependency.dstSubpass != VK_SUBPASS_EXTERNAL) {
8623            subpass_to_node[dependency.dstSubpass].prev.push_back(dependency.srcSubpass);
8624        }
8625        if (dependency.srcSubpass != VK_SUBPASS_EXTERNAL) {
8626            subpass_to_node[dependency.srcSubpass].next.push_back(dependency.dstSubpass);
8627        }
8628    }
8629    return skip_call;
8630}
8631
8632
8633VKAPI_ATTR VkResult VKAPI_CALL CreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo,
8634                                                  const VkAllocationCallbacks *pAllocator,
8635                                                  VkShaderModule *pShaderModule) {
8636    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
8637    bool skip_call = false;
8638
8639    /* Use SPIRV-Tools validator to try and catch any issues with the module itself */
8640    spv_context ctx = spvContextCreate(SPV_ENV_VULKAN_1_0);
8641    spv_const_binary_t binary { pCreateInfo->pCode, pCreateInfo->codeSize / sizeof(uint32_t) };
8642    spv_diagnostic diag = nullptr;
8643
8644    auto result = spvValidate(ctx, &binary, &diag);
8645    if (result != SPV_SUCCESS) {
8646        skip_call |= log_msg(my_data->report_data,
8647                             result == SPV_WARNING ? VK_DEBUG_REPORT_WARNING_BIT_EXT : VK_DEBUG_REPORT_ERROR_BIT_EXT,
8648                             VkDebugReportObjectTypeEXT(0), 0,
8649                             __LINE__, SHADER_CHECKER_INCONSISTENT_SPIRV, "SC", "SPIR-V module not valid: %s",
8650                             diag && diag->error ? diag->error : "(no error text)");
8651    }
8652
8653    spvDiagnosticDestroy(diag);
8654    spvContextDestroy(ctx);
8655
8656    if (skip_call)
8657        return VK_ERROR_VALIDATION_FAILED_EXT;
8658
8659    VkResult res = my_data->device_dispatch_table->CreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule);
8660
8661    if (res == VK_SUCCESS) {
8662        std::lock_guard<std::mutex> lock(global_lock);
8663        my_data->shaderModuleMap[*pShaderModule] = unique_ptr<shader_module>(new shader_module(pCreateInfo));
8664    }
8665    return res;
8666}
8667
8668static bool ValidateAttachmentIndex(layer_data *dev_data, uint32_t attachment, uint32_t attachment_count, const char *type) {
8669    bool skip_call = false;
8670    if (attachment >= attachment_count && attachment != VK_ATTACHMENT_UNUSED) {
8671        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8672                             DRAWSTATE_INVALID_ATTACHMENT_INDEX, "DS",
8673                             "CreateRenderPass: %s attachment %d cannot be greater than the total number of attachments %d.",
8674                             type, attachment, attachment_count);
8675    }
8676    return skip_call;
8677}
8678
8679static bool ValidateRenderpassAttachmentUsage(layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo) {
8680    bool skip_call = false;
8681    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8682        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8683        if (subpass.pipelineBindPoint != VK_PIPELINE_BIND_POINT_GRAPHICS) {
8684            skip_call |=
8685                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8686                        DRAWSTATE_INVALID_RENDERPASS, "DS",
8687                        "CreateRenderPass: Pipeline bind point for subpass %d must be VK_PIPELINE_BIND_POINT_GRAPHICS.", i);
8688        }
8689        for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
8690            uint32_t attachment = subpass.pPreserveAttachments[j];
8691            if (attachment == VK_ATTACHMENT_UNUSED) {
8692                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8693                                     __LINE__, DRAWSTATE_INVALID_ATTACHMENT_INDEX, "DS",
8694                                     "CreateRenderPass:  Preserve attachment (%d) must not be VK_ATTACHMENT_UNUSED.", j);
8695            } else {
8696                skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Preserve");
8697            }
8698        }
8699        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8700            uint32_t attachment;
8701            if (subpass.pResolveAttachments) {
8702                attachment = subpass.pResolveAttachments[j].attachment;
8703                skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Resolve");
8704            }
8705            attachment = subpass.pColorAttachments[j].attachment;
8706            skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Color");
8707        }
8708        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
8709            uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
8710            skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Depth stencil");
8711        }
8712        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8713            uint32_t attachment = subpass.pInputAttachments[j].attachment;
8714            skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Input");
8715        }
8716    }
8717    return skip_call;
8718}
8719
8720VKAPI_ATTR VkResult VKAPI_CALL CreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
8721                                                const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) {
8722    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
8723    bool skip_call = false;
8724    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
8725
8726    std::unique_lock<std::mutex> lock(global_lock);
8727
8728    skip_call |= ValidateLayouts(dev_data, device, pCreateInfo);
8729    // TODO: As part of wrapping up the mem_tracker/core_validation merge the following routine should be consolidated with
8730    //       ValidateLayouts.
8731    skip_call |= ValidateRenderpassAttachmentUsage(dev_data, pCreateInfo);
8732
8733    if (skip_call) {
8734        return VK_ERROR_VALIDATION_FAILED_EXT;
8735    }
8736
8737    lock.unlock();
8738    if (skip_call == false) {
8739        result = dev_data->device_dispatch_table->CreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass);
8740    }
8741    if (VK_SUCCESS == result) {
8742        lock.lock();
8743
8744        std::vector<bool> has_self_dependency(pCreateInfo->subpassCount);
8745        std::vector<DAGNode> subpass_to_node(pCreateInfo->subpassCount);
8746        skip_call |= CreatePassDAG(dev_data, device, pCreateInfo, subpass_to_node, has_self_dependency);
8747
8748        // TODOSC : Merge in tracking of renderpass from shader_checker
8749        // Shadow create info and store in map
8750        VkRenderPassCreateInfo *localRPCI = new VkRenderPassCreateInfo(*pCreateInfo);
8751        if (pCreateInfo->pAttachments) {
8752            localRPCI->pAttachments = new VkAttachmentDescription[localRPCI->attachmentCount];
8753            memcpy((void *)localRPCI->pAttachments, pCreateInfo->pAttachments,
8754                   localRPCI->attachmentCount * sizeof(VkAttachmentDescription));
8755        }
8756        if (pCreateInfo->pSubpasses) {
8757            localRPCI->pSubpasses = new VkSubpassDescription[localRPCI->subpassCount];
8758            memcpy((void *)localRPCI->pSubpasses, pCreateInfo->pSubpasses, localRPCI->subpassCount * sizeof(VkSubpassDescription));
8759
8760            for (uint32_t i = 0; i < localRPCI->subpassCount; i++) {
8761                VkSubpassDescription *subpass = (VkSubpassDescription *)&localRPCI->pSubpasses[i];
8762                const uint32_t attachmentCount = subpass->inputAttachmentCount +
8763                                                 subpass->colorAttachmentCount * (1 + (subpass->pResolveAttachments ? 1 : 0)) +
8764                                                 ((subpass->pDepthStencilAttachment) ? 1 : 0) + subpass->preserveAttachmentCount;
8765                VkAttachmentReference *attachments = new VkAttachmentReference[attachmentCount];
8766
8767                memcpy(attachments, subpass->pInputAttachments, sizeof(attachments[0]) * subpass->inputAttachmentCount);
8768                subpass->pInputAttachments = attachments;
8769                attachments += subpass->inputAttachmentCount;
8770
8771                memcpy(attachments, subpass->pColorAttachments, sizeof(attachments[0]) * subpass->colorAttachmentCount);
8772                subpass->pColorAttachments = attachments;
8773                attachments += subpass->colorAttachmentCount;
8774
8775                if (subpass->pResolveAttachments) {
8776                    memcpy(attachments, subpass->pResolveAttachments, sizeof(attachments[0]) * subpass->colorAttachmentCount);
8777                    subpass->pResolveAttachments = attachments;
8778                    attachments += subpass->colorAttachmentCount;
8779                }
8780
8781                if (subpass->pDepthStencilAttachment) {
8782                    memcpy(attachments, subpass->pDepthStencilAttachment, sizeof(attachments[0]) * 1);
8783                    subpass->pDepthStencilAttachment = attachments;
8784                    attachments += 1;
8785                }
8786
8787                memcpy(attachments, subpass->pPreserveAttachments, sizeof(attachments[0]) * subpass->preserveAttachmentCount);
8788                subpass->pPreserveAttachments = &attachments->attachment;
8789            }
8790        }
8791        if (pCreateInfo->pDependencies) {
8792            localRPCI->pDependencies = new VkSubpassDependency[localRPCI->dependencyCount];
8793            memcpy((void *)localRPCI->pDependencies, pCreateInfo->pDependencies,
8794                   localRPCI->dependencyCount * sizeof(VkSubpassDependency));
8795        }
8796
8797        auto render_pass = new RENDER_PASS_NODE(localRPCI);
8798        render_pass->renderPass = *pRenderPass;
8799        render_pass->hasSelfDependency = has_self_dependency;
8800        render_pass->subpassToNode = subpass_to_node;
8801#if MTMERGESOURCE
8802        // MTMTODO : Merge with code from above to eliminate duplication
8803        for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
8804            VkAttachmentDescription desc = pCreateInfo->pAttachments[i];
8805            MT_PASS_ATTACHMENT_INFO pass_info;
8806            pass_info.load_op = desc.loadOp;
8807            pass_info.store_op = desc.storeOp;
8808            pass_info.stencil_load_op = desc.stencilLoadOp;
8809            pass_info.stencil_store_op = desc.stencilStoreOp;
8810            pass_info.attachment = i;
8811            render_pass->attachments.push_back(pass_info);
8812        }
8813        // TODO: Maybe fill list and then copy instead of locking
8814        std::unordered_map<uint32_t, bool> &attachment_first_read = render_pass->attachment_first_read;
8815        std::unordered_map<uint32_t, VkImageLayout> &attachment_first_layout = render_pass->attachment_first_layout;
8816        for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8817            const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8818            for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8819                uint32_t attachment = subpass.pColorAttachments[j].attachment;
8820                if (!attachment_first_read.count(attachment)) {
8821                    attachment_first_read.insert(std::make_pair(attachment, false));
8822                    attachment_first_layout.insert(std::make_pair(attachment, subpass.pColorAttachments[j].layout));
8823                }
8824            }
8825            if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
8826                uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
8827                if (!attachment_first_read.count(attachment)) {
8828                    attachment_first_read.insert(std::make_pair(attachment, false));
8829                    attachment_first_layout.insert(std::make_pair(attachment, subpass.pDepthStencilAttachment->layout));
8830                }
8831            }
8832            for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8833                uint32_t attachment = subpass.pInputAttachments[j].attachment;
8834                if (!attachment_first_read.count(attachment)) {
8835                    attachment_first_read.insert(std::make_pair(attachment, true));
8836                    attachment_first_layout.insert(std::make_pair(attachment, subpass.pInputAttachments[j].layout));
8837                }
8838            }
8839        }
8840#endif
8841        dev_data->renderPassMap[*pRenderPass] = render_pass;
8842    }
8843    return result;
8844}
8845
8846// Free the renderpass shadow
8847static void deleteRenderPasses(layer_data *my_data) {
8848    if (my_data->renderPassMap.size() <= 0)
8849        return;
8850    for (auto ii = my_data->renderPassMap.begin(); ii != my_data->renderPassMap.end(); ++ii) {
8851        const VkRenderPassCreateInfo *pRenderPassInfo = (*ii).second->pCreateInfo;
8852        delete[] pRenderPassInfo->pAttachments;
8853        if (pRenderPassInfo->pSubpasses) {
8854            for (uint32_t i = 0; i < pRenderPassInfo->subpassCount; ++i) {
8855                // Attachements are all allocated in a block, so just need to
8856                //  find the first non-null one to delete
8857                if (pRenderPassInfo->pSubpasses[i].pInputAttachments) {
8858                    delete[] pRenderPassInfo->pSubpasses[i].pInputAttachments;
8859                } else if (pRenderPassInfo->pSubpasses[i].pColorAttachments) {
8860                    delete[] pRenderPassInfo->pSubpasses[i].pColorAttachments;
8861                } else if (pRenderPassInfo->pSubpasses[i].pResolveAttachments) {
8862                    delete[] pRenderPassInfo->pSubpasses[i].pResolveAttachments;
8863                } else if (pRenderPassInfo->pSubpasses[i].pPreserveAttachments) {
8864                    delete[] pRenderPassInfo->pSubpasses[i].pPreserveAttachments;
8865                }
8866            }
8867            delete[] pRenderPassInfo->pSubpasses;
8868        }
8869        delete[] pRenderPassInfo->pDependencies;
8870        delete pRenderPassInfo;
8871        delete (*ii).second;
8872    }
8873    my_data->renderPassMap.clear();
8874}
8875
8876static bool VerifyFramebufferAndRenderPassLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin) {
8877    bool skip_call = false;
8878    const VkRenderPassCreateInfo *pRenderPassInfo = dev_data->renderPassMap[pRenderPassBegin->renderPass]->pCreateInfo;
8879    const VkFramebufferCreateInfo framebufferInfo = dev_data->frameBufferMap[pRenderPassBegin->framebuffer].createInfo;
8880    if (pRenderPassInfo->attachmentCount != framebufferInfo.attachmentCount) {
8881        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8882                             DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot start a render pass using a framebuffer "
8883                                                                 "with a different number of attachments.");
8884    }
8885    for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
8886        const VkImageView &image_view = framebufferInfo.pAttachments[i];
8887        auto image_data = getImageViewData(dev_data, image_view);
8888        assert(image_data);
8889        const VkImage &image = image_data->image;
8890        const VkImageSubresourceRange &subRange = image_data->subresourceRange;
8891        IMAGE_CMD_BUF_LAYOUT_NODE newNode = {pRenderPassInfo->pAttachments[i].initialLayout,
8892                                             pRenderPassInfo->pAttachments[i].initialLayout};
8893        // TODO: Do not iterate over every possibility - consolidate where possible
8894        for (uint32_t j = 0; j < subRange.levelCount; j++) {
8895            uint32_t level = subRange.baseMipLevel + j;
8896            for (uint32_t k = 0; k < subRange.layerCount; k++) {
8897                uint32_t layer = subRange.baseArrayLayer + k;
8898                VkImageSubresource sub = {subRange.aspectMask, level, layer};
8899                IMAGE_CMD_BUF_LAYOUT_NODE node;
8900                if (!FindLayout(pCB, image, sub, node)) {
8901                    SetLayout(pCB, image, sub, newNode);
8902                    continue;
8903                }
8904                if (newNode.layout != node.layout) {
8905                    skip_call |=
8906                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8907                                DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot start a render pass using attachment %i "
8908                                                                    "where the "
8909                                                                    "initial layout is %s and the layout of the attachment at the "
8910                                                                    "start of the render pass is %s. The layouts must match.",
8911                                i, string_VkImageLayout(newNode.layout), string_VkImageLayout(node.layout));
8912                }
8913            }
8914        }
8915    }
8916    return skip_call;
8917}
8918
8919static void TransitionSubpassLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin,
8920                                     const int subpass_index) {
8921    auto renderPass = getRenderPass(dev_data, pRenderPassBegin->renderPass);
8922    if (!renderPass)
8923        return;
8924
8925    auto framebuffer = getFramebuffer(dev_data, pRenderPassBegin->framebuffer);
8926    if (!framebuffer)
8927        return;
8928
8929    const VkFramebufferCreateInfo &framebufferInfo = framebuffer->createInfo;
8930    const VkSubpassDescription &subpass = renderPass->pCreateInfo->pSubpasses[subpass_index];
8931    for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8932        const VkImageView &image_view = framebufferInfo.pAttachments[subpass.pInputAttachments[j].attachment];
8933        SetLayout(dev_data, pCB, image_view, subpass.pInputAttachments[j].layout);
8934    }
8935    for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8936        const VkImageView &image_view = framebufferInfo.pAttachments[subpass.pColorAttachments[j].attachment];
8937        SetLayout(dev_data, pCB, image_view, subpass.pColorAttachments[j].layout);
8938    }
8939    if ((subpass.pDepthStencilAttachment != NULL) && (subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) {
8940        const VkImageView &image_view = framebufferInfo.pAttachments[subpass.pDepthStencilAttachment->attachment];
8941        SetLayout(dev_data, pCB, image_view, subpass.pDepthStencilAttachment->layout);
8942    }
8943}
8944
8945static bool validatePrimaryCommandBuffer(const layer_data *my_data, const GLOBAL_CB_NODE *pCB, const std::string &cmd_name) {
8946    bool skip_call = false;
8947    if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
8948        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8949                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", "Cannot execute command %s on a secondary command buffer.",
8950                             cmd_name.c_str());
8951    }
8952    return skip_call;
8953}
8954
8955static void TransitionFinalSubpassLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin) {
8956    auto renderPass = getRenderPass(dev_data, pRenderPassBegin->renderPass);
8957    if (!renderPass)
8958        return;
8959
8960    const VkRenderPassCreateInfo *pRenderPassInfo = renderPass->pCreateInfo;
8961    auto framebuffer = getFramebuffer(dev_data, pRenderPassBegin->framebuffer);
8962    if (!framebuffer)
8963        return;
8964
8965    for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
8966        const VkImageView &image_view = framebuffer->createInfo.pAttachments[i];
8967        SetLayout(dev_data, pCB, image_view, pRenderPassInfo->pAttachments[i].finalLayout);
8968    }
8969}
8970
8971static bool VerifyRenderAreaBounds(const layer_data *my_data, const VkRenderPassBeginInfo *pRenderPassBegin) {
8972    bool skip_call = false;
8973    const VkFramebufferCreateInfo *pFramebufferInfo = &my_data->frameBufferMap.at(pRenderPassBegin->framebuffer).createInfo;
8974    if (pRenderPassBegin->renderArea.offset.x < 0 ||
8975        (pRenderPassBegin->renderArea.offset.x + pRenderPassBegin->renderArea.extent.width) > pFramebufferInfo->width ||
8976        pRenderPassBegin->renderArea.offset.y < 0 ||
8977        (pRenderPassBegin->renderArea.offset.y + pRenderPassBegin->renderArea.extent.height) > pFramebufferInfo->height) {
8978        skip_call |= static_cast<bool>(log_msg(
8979            my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8980            DRAWSTATE_INVALID_RENDER_AREA, "CORE",
8981            "Cannot execute a render pass with renderArea not within the bound of the "
8982            "framebuffer. RenderArea: x %d, y %d, width %d, height %d. Framebuffer: width %d, "
8983            "height %d.",
8984            pRenderPassBegin->renderArea.offset.x, pRenderPassBegin->renderArea.offset.y, pRenderPassBegin->renderArea.extent.width,
8985            pRenderPassBegin->renderArea.extent.height, pFramebufferInfo->width, pFramebufferInfo->height));
8986    }
8987    return skip_call;
8988}
8989
8990// If this is a stencil format, make sure the stencil[Load|Store]Op flag is checked, while if it is a depth/color attachment the
8991// [load|store]Op flag must be checked
8992// TODO: The memory valid flag in DEVICE_MEM_INFO should probably be split to track the validity of stencil memory separately.
8993template <typename T> static bool FormatSpecificLoadAndStoreOpSettings(VkFormat format, T color_depth_op, T stencil_op, T op) {
8994    bool check_color_depth_load_op = !vk_format_is_stencil_only(format);
8995    bool check_stencil_load_op = vk_format_is_depth_and_stencil(format) || !check_color_depth_load_op;
8996    // For now, having either the color/depth op OR the stencil op will make the memory valid. They may need to be tracked separately
8997    bool failed = ((check_stencil_load_op && (stencil_op != op)) && (check_color_depth_load_op && (color_depth_op != op)));
8998    return !failed;
8999}
9000
9001VKAPI_ATTR void VKAPI_CALL
9002CmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, VkSubpassContents contents) {
9003    bool skipCall = false;
9004    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9005    std::unique_lock<std::mutex> lock(global_lock);
9006    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9007    auto renderPass = pRenderPassBegin ? getRenderPass(dev_data, pRenderPassBegin->renderPass) : nullptr;
9008    auto framebuffer = pRenderPassBegin ? getFramebuffer(dev_data, pRenderPassBegin->framebuffer) : nullptr;
9009    if (pCB) {
9010        if (renderPass) {
9011            uint32_t clear_op_count = 0;
9012            pCB->activeFramebuffer = pRenderPassBegin->framebuffer;
9013            for (size_t i = 0; i < renderPass->attachments.size(); ++i) {
9014                MT_FB_ATTACHMENT_INFO &fb_info = framebuffer->attachments[i];
9015                VkFormat format = renderPass->pCreateInfo->pAttachments[renderPass->attachments[i].attachment].format;
9016                if (FormatSpecificLoadAndStoreOpSettings(format, renderPass->attachments[i].load_op,
9017                                                         renderPass->attachments[i].stencil_load_op,
9018                                                         VK_ATTACHMENT_LOAD_OP_CLEAR)) {
9019                    ++clear_op_count;
9020                    std::function<bool()> function = [=]() {
9021                        set_memory_valid(dev_data, fb_info.mem, true, fb_info.image);
9022                        return false;
9023                    };
9024                    pCB->validate_functions.push_back(function);
9025                } else if (FormatSpecificLoadAndStoreOpSettings(format, renderPass->attachments[i].load_op,
9026                                                                renderPass->attachments[i].stencil_load_op,
9027                                                                VK_ATTACHMENT_LOAD_OP_DONT_CARE)) {
9028                    std::function<bool()> function = [=]() {
9029                        set_memory_valid(dev_data, fb_info.mem, false, fb_info.image);
9030                        return false;
9031                    };
9032                    pCB->validate_functions.push_back(function);
9033                } else if (FormatSpecificLoadAndStoreOpSettings(format, renderPass->attachments[i].load_op,
9034                                                                renderPass->attachments[i].stencil_load_op,
9035                                                                VK_ATTACHMENT_LOAD_OP_LOAD)) {
9036                    std::function<bool()> function = [=]() {
9037                        return validate_memory_is_valid(dev_data, fb_info.mem, "vkCmdBeginRenderPass()", fb_info.image);
9038                    };
9039                    pCB->validate_functions.push_back(function);
9040                }
9041                if (renderPass->attachment_first_read[renderPass->attachments[i].attachment]) {
9042                    std::function<bool()> function = [=]() {
9043                        return validate_memory_is_valid(dev_data, fb_info.mem, "vkCmdBeginRenderPass()", fb_info.image);
9044                    };
9045                    pCB->validate_functions.push_back(function);
9046                }
9047            }
9048            if (clear_op_count > pRenderPassBegin->clearValueCount) {
9049                skipCall |= log_msg(
9050                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
9051                    reinterpret_cast<uint64_t &>(renderPass), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
9052                    "In vkCmdBeginRenderPass() the VkRenderPassBeginInfo struct has a clearValueCount of %u but the actual number "
9053                    "of attachments in renderPass 0x%" PRIx64 " that use VK_ATTACHMENT_LOAD_OP_CLEAR is %u. The clearValueCount "
9054                    "must therefore be greater than or equal to %u.",
9055                    pRenderPassBegin->clearValueCount, reinterpret_cast<uint64_t &>(renderPass), clear_op_count, clear_op_count);
9056            }
9057            skipCall |= VerifyRenderAreaBounds(dev_data, pRenderPassBegin);
9058            skipCall |= VerifyFramebufferAndRenderPassLayouts(dev_data, pCB, pRenderPassBegin);
9059            skipCall |= insideRenderPass(dev_data, pCB, "vkCmdBeginRenderPass");
9060            skipCall |= ValidateDependencies(dev_data, framebuffer, renderPass);
9061            pCB->activeRenderPass = renderPass;
9062            skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdBeginRenderPass");
9063            skipCall |= addCmd(dev_data, pCB, CMD_BEGINRENDERPASS, "vkCmdBeginRenderPass()");
9064            // This is a shallow copy as that is all that is needed for now
9065            pCB->activeRenderPassBeginInfo = *pRenderPassBegin;
9066            pCB->activeSubpass = 0;
9067            pCB->activeSubpassContents = contents;
9068            pCB->framebuffers.insert(pRenderPassBegin->framebuffer);
9069            // Connect this framebuffer to this cmdBuffer
9070            framebuffer->referencingCmdBuffers.insert(pCB->commandBuffer);
9071        } else {
9072            skipCall |=
9073                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9074                            DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot use a NULL RenderPass object in vkCmdBeginRenderPass()");
9075        }
9076    }
9077    lock.unlock();
9078    if (!skipCall) {
9079        dev_data->device_dispatch_table->CmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
9080    }
9081}
9082
9083VKAPI_ATTR void VKAPI_CALL CmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
9084    bool skipCall = false;
9085    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9086    std::unique_lock<std::mutex> lock(global_lock);
9087    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9088    if (pCB) {
9089        skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdNextSubpass");
9090        skipCall |= addCmd(dev_data, pCB, CMD_NEXTSUBPASS, "vkCmdNextSubpass()");
9091        pCB->activeSubpass++;
9092        pCB->activeSubpassContents = contents;
9093        TransitionSubpassLayouts(dev_data, pCB, &pCB->activeRenderPassBeginInfo, pCB->activeSubpass);
9094        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdNextSubpass");
9095    }
9096    lock.unlock();
9097    if (!skipCall)
9098        dev_data->device_dispatch_table->CmdNextSubpass(commandBuffer, contents);
9099}
9100
9101VKAPI_ATTR void VKAPI_CALL CmdEndRenderPass(VkCommandBuffer commandBuffer) {
9102    bool skipCall = false;
9103    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9104    std::unique_lock<std::mutex> lock(global_lock);
9105    auto pCB = getCBNode(dev_data, commandBuffer);
9106    if (pCB) {
9107        RENDER_PASS_NODE* pRPNode = pCB->activeRenderPass;
9108        auto framebuffer = getFramebuffer(dev_data, pCB->activeFramebuffer);
9109        if (pRPNode) {
9110            for (size_t i = 0; i < pRPNode->attachments.size(); ++i) {
9111                MT_FB_ATTACHMENT_INFO &fb_info = framebuffer->attachments[i];
9112                VkFormat format = pRPNode->pCreateInfo->pAttachments[pRPNode->attachments[i].attachment].format;
9113                if (FormatSpecificLoadAndStoreOpSettings(format, pRPNode->attachments[i].store_op,
9114                                                         pRPNode->attachments[i].stencil_store_op, VK_ATTACHMENT_STORE_OP_STORE)) {
9115                    std::function<bool()> function = [=]() {
9116                        set_memory_valid(dev_data, fb_info.mem, true, fb_info.image);
9117                        return false;
9118                    };
9119                    pCB->validate_functions.push_back(function);
9120                } else if (FormatSpecificLoadAndStoreOpSettings(format, pRPNode->attachments[i].store_op,
9121                                                                pRPNode->attachments[i].stencil_store_op,
9122                                                                VK_ATTACHMENT_STORE_OP_DONT_CARE)) {
9123                    std::function<bool()> function = [=]() {
9124                        set_memory_valid(dev_data, fb_info.mem, false, fb_info.image);
9125                        return false;
9126                    };
9127                    pCB->validate_functions.push_back(function);
9128                }
9129            }
9130        }
9131        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdEndRenderpass");
9132        skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdEndRenderPass");
9133        skipCall |= addCmd(dev_data, pCB, CMD_ENDRENDERPASS, "vkCmdEndRenderPass()");
9134        TransitionFinalSubpassLayouts(dev_data, pCB, &pCB->activeRenderPassBeginInfo);
9135        pCB->activeRenderPass = nullptr;
9136        pCB->activeSubpass = 0;
9137        pCB->activeFramebuffer = VK_NULL_HANDLE;
9138    }
9139    lock.unlock();
9140    if (!skipCall)
9141        dev_data->device_dispatch_table->CmdEndRenderPass(commandBuffer);
9142}
9143
9144static bool logInvalidAttachmentMessage(layer_data *dev_data, VkCommandBuffer secondaryBuffer, RENDER_PASS_NODE const *secondaryPass,
9145                                        RENDER_PASS_NODE const *primaryPass, uint32_t primaryAttach, uint32_t secondaryAttach,
9146                                        const char *msg) {
9147    return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9148                   DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9149                   "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p which has a render pass 0x%" PRIx64
9150                   " that is not compatible with the current render pass 0x%" PRIx64 "."
9151                   "Attachment %" PRIu32 " is not compatible with %" PRIu32 ". %s",
9152                   (void *)secondaryBuffer, (uint64_t)(secondaryPass->renderPass), (uint64_t)(primaryPass->renderPass), primaryAttach, secondaryAttach,
9153                   msg);
9154}
9155
9156static bool validateAttachmentCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, RENDER_PASS_NODE const *primaryPass,
9157                                            uint32_t primaryAttach, VkCommandBuffer secondaryBuffer, RENDER_PASS_NODE const *secondaryPass,
9158                                            uint32_t secondaryAttach, bool is_multi) {
9159    bool skip_call = false;
9160    if (primaryPass->pCreateInfo->attachmentCount <= primaryAttach) {
9161        primaryAttach = VK_ATTACHMENT_UNUSED;
9162    }
9163    if (secondaryPass->pCreateInfo->attachmentCount <= secondaryAttach) {
9164        secondaryAttach = VK_ATTACHMENT_UNUSED;
9165    }
9166    if (primaryAttach == VK_ATTACHMENT_UNUSED && secondaryAttach == VK_ATTACHMENT_UNUSED) {
9167        return skip_call;
9168    }
9169    if (primaryAttach == VK_ATTACHMENT_UNUSED) {
9170        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9171                                                 secondaryAttach, "The first is unused while the second is not.");
9172        return skip_call;
9173    }
9174    if (secondaryAttach == VK_ATTACHMENT_UNUSED) {
9175        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9176                                                 secondaryAttach, "The second is unused while the first is not.");
9177        return skip_call;
9178    }
9179    if (primaryPass->pCreateInfo->pAttachments[primaryAttach].format !=
9180        secondaryPass->pCreateInfo->pAttachments[secondaryAttach].format) {
9181        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9182                                                 secondaryAttach, "They have different formats.");
9183    }
9184    if (primaryPass->pCreateInfo->pAttachments[primaryAttach].samples !=
9185        secondaryPass->pCreateInfo->pAttachments[secondaryAttach].samples) {
9186        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9187                                                 secondaryAttach, "They have different samples.");
9188    }
9189    if (is_multi &&
9190        primaryPass->pCreateInfo->pAttachments[primaryAttach].flags !=
9191            secondaryPass->pCreateInfo->pAttachments[secondaryAttach].flags) {
9192        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9193                                                 secondaryAttach, "They have different flags.");
9194    }
9195    return skip_call;
9196}
9197
9198static bool validateSubpassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, RENDER_PASS_NODE const *primaryPass,
9199                                         VkCommandBuffer secondaryBuffer, RENDER_PASS_NODE const *secondaryPass, const int subpass,
9200                                         bool is_multi) {
9201    bool skip_call = false;
9202    const VkSubpassDescription &primary_desc = primaryPass->pCreateInfo->pSubpasses[subpass];
9203    const VkSubpassDescription &secondary_desc = secondaryPass->pCreateInfo->pSubpasses[subpass];
9204    uint32_t maxInputAttachmentCount = std::max(primary_desc.inputAttachmentCount, secondary_desc.inputAttachmentCount);
9205    for (uint32_t i = 0; i < maxInputAttachmentCount; ++i) {
9206        uint32_t primary_input_attach = VK_ATTACHMENT_UNUSED, secondary_input_attach = VK_ATTACHMENT_UNUSED;
9207        if (i < primary_desc.inputAttachmentCount) {
9208            primary_input_attach = primary_desc.pInputAttachments[i].attachment;
9209        }
9210        if (i < secondary_desc.inputAttachmentCount) {
9211            secondary_input_attach = secondary_desc.pInputAttachments[i].attachment;
9212        }
9213        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_input_attach, secondaryBuffer,
9214                                                     secondaryPass, secondary_input_attach, is_multi);
9215    }
9216    uint32_t maxColorAttachmentCount = std::max(primary_desc.colorAttachmentCount, secondary_desc.colorAttachmentCount);
9217    for (uint32_t i = 0; i < maxColorAttachmentCount; ++i) {
9218        uint32_t primary_color_attach = VK_ATTACHMENT_UNUSED, secondary_color_attach = VK_ATTACHMENT_UNUSED;
9219        if (i < primary_desc.colorAttachmentCount) {
9220            primary_color_attach = primary_desc.pColorAttachments[i].attachment;
9221        }
9222        if (i < secondary_desc.colorAttachmentCount) {
9223            secondary_color_attach = secondary_desc.pColorAttachments[i].attachment;
9224        }
9225        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_color_attach, secondaryBuffer,
9226                                                     secondaryPass, secondary_color_attach, is_multi);
9227        uint32_t primary_resolve_attach = VK_ATTACHMENT_UNUSED, secondary_resolve_attach = VK_ATTACHMENT_UNUSED;
9228        if (i < primary_desc.colorAttachmentCount && primary_desc.pResolveAttachments) {
9229            primary_resolve_attach = primary_desc.pResolveAttachments[i].attachment;
9230        }
9231        if (i < secondary_desc.colorAttachmentCount && secondary_desc.pResolveAttachments) {
9232            secondary_resolve_attach = secondary_desc.pResolveAttachments[i].attachment;
9233        }
9234        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_resolve_attach, secondaryBuffer,
9235                                                     secondaryPass, secondary_resolve_attach, is_multi);
9236    }
9237    uint32_t primary_depthstencil_attach = VK_ATTACHMENT_UNUSED, secondary_depthstencil_attach = VK_ATTACHMENT_UNUSED;
9238    if (primary_desc.pDepthStencilAttachment) {
9239        primary_depthstencil_attach = primary_desc.pDepthStencilAttachment[0].attachment;
9240    }
9241    if (secondary_desc.pDepthStencilAttachment) {
9242        secondary_depthstencil_attach = secondary_desc.pDepthStencilAttachment[0].attachment;
9243    }
9244    skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_depthstencil_attach, secondaryBuffer,
9245                                                 secondaryPass, secondary_depthstencil_attach, is_multi);
9246    return skip_call;
9247}
9248
9249static bool validateRenderPassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, VkRenderPass primaryPass,
9250                                            VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass) {
9251    bool skip_call = false;
9252    // Early exit if renderPass objects are identical (and therefore compatible)
9253    if (primaryPass == secondaryPass)
9254        return skip_call;
9255    auto primary_render_pass = getRenderPass(dev_data, primaryPass);
9256    auto secondary_render_pass = getRenderPass(dev_data, secondaryPass);
9257    if (!primary_render_pass) {
9258        skip_call |=
9259            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9260                    DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9261                    "vkCmdExecuteCommands() called w/ invalid current Cmd Buffer 0x%p which has invalid render pass 0x%" PRIx64 ".",
9262                    (void *)primaryBuffer, (uint64_t)(primaryPass));
9263        return skip_call;
9264    }
9265    if (!secondary_render_pass) {
9266        skip_call |=
9267            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9268                    DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9269                    "vkCmdExecuteCommands() called w/ invalid secondary Cmd Buffer 0x%p which has invalid render pass 0x%" PRIx64 ".",
9270                    (void *)secondaryBuffer, (uint64_t)(secondaryPass));
9271        return skip_call;
9272    }
9273    if (primary_render_pass->pCreateInfo->subpassCount != secondary_render_pass->pCreateInfo->subpassCount) {
9274        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9275                             DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9276                             "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p which has a render pass 0x%" PRIx64
9277                             " that is not compatible with the current render pass 0x%" PRIx64 "."
9278                             "They have a different number of subpasses.",
9279                             (void *)secondaryBuffer, (uint64_t)(secondaryPass), (uint64_t)(primaryPass));
9280        return skip_call;
9281    }
9282    auto subpassCount = primary_render_pass->pCreateInfo->subpassCount;
9283    for (uint32_t i = 0; i < subpassCount; ++i) {
9284        skip_call |= validateSubpassCompatibility(dev_data, primaryBuffer, primary_render_pass, secondaryBuffer,
9285                                                  secondary_render_pass, i, subpassCount > 1);
9286    }
9287    return skip_call;
9288}
9289
9290static bool validateFramebuffer(layer_data *dev_data, VkCommandBuffer primaryBuffer, const GLOBAL_CB_NODE *pCB,
9291                                VkCommandBuffer secondaryBuffer, const GLOBAL_CB_NODE *pSubCB) {
9292    bool skip_call = false;
9293    if (!pSubCB->beginInfo.pInheritanceInfo) {
9294        return skip_call;
9295    }
9296    VkFramebuffer primary_fb = pCB->activeFramebuffer;
9297    VkFramebuffer secondary_fb = pSubCB->beginInfo.pInheritanceInfo->framebuffer;
9298    if (secondary_fb != VK_NULL_HANDLE) {
9299        if (primary_fb != secondary_fb) {
9300            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9301                                 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9302                                 "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p which has a framebuffer 0x%" PRIx64
9303                                 " that is not compatible with the current framebuffer 0x%" PRIx64 ".",
9304                                 (void *)secondaryBuffer, (uint64_t)(secondary_fb), (uint64_t)(primary_fb));
9305        }
9306        auto fb = getFramebuffer(dev_data, secondary_fb);
9307        if (!fb) {
9308            skip_call |=
9309                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9310                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
9311                                                                          "which has invalid framebuffer 0x%" PRIx64 ".",
9312                        (void *)secondaryBuffer, (uint64_t)(secondary_fb));
9313            return skip_call;
9314        }
9315        skip_call |= validateRenderPassCompatibility(dev_data, secondaryBuffer, fb->createInfo.renderPass,
9316                                                     secondaryBuffer, pSubCB->beginInfo.pInheritanceInfo->renderPass);
9317    }
9318    return skip_call;
9319}
9320
9321static bool validateSecondaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, GLOBAL_CB_NODE *pSubCB) {
9322    bool skipCall = false;
9323    unordered_set<int> activeTypes;
9324    for (auto queryObject : pCB->activeQueries) {
9325        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
9326        if (queryPoolData != dev_data->queryPoolMap.end()) {
9327            if (queryPoolData->second.createInfo.queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS &&
9328                pSubCB->beginInfo.pInheritanceInfo) {
9329                VkQueryPipelineStatisticFlags cmdBufStatistics = pSubCB->beginInfo.pInheritanceInfo->pipelineStatistics;
9330                if ((cmdBufStatistics & queryPoolData->second.createInfo.pipelineStatistics) != cmdBufStatistics) {
9331                    skipCall |= log_msg(
9332                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9333                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9334                        "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
9335                        "which has invalid active query pool 0x%" PRIx64 ". Pipeline statistics is being queried so the command "
9336                        "buffer must have all bits set on the queryPool.",
9337                        reinterpret_cast<void *>(pCB->commandBuffer), reinterpret_cast<const uint64_t &>(queryPoolData->first));
9338                }
9339            }
9340            activeTypes.insert(queryPoolData->second.createInfo.queryType);
9341        }
9342    }
9343    for (auto queryObject : pSubCB->startedQueries) {
9344        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
9345        if (queryPoolData != dev_data->queryPoolMap.end() && activeTypes.count(queryPoolData->second.createInfo.queryType)) {
9346            skipCall |=
9347                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9348                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9349                        "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
9350                        "which has invalid active query pool 0x%" PRIx64 "of type %d but a query of that type has been started on "
9351                        "secondary Cmd Buffer 0x%p.",
9352                        reinterpret_cast<void *>(pCB->commandBuffer), reinterpret_cast<const uint64_t &>(queryPoolData->first),
9353                        queryPoolData->second.createInfo.queryType, reinterpret_cast<void *>(pSubCB->commandBuffer));
9354        }
9355    }
9356    return skipCall;
9357}
9358
9359VKAPI_ATTR void VKAPI_CALL
9360CmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount, const VkCommandBuffer *pCommandBuffers) {
9361    bool skipCall = false;
9362    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9363    std::unique_lock<std::mutex> lock(global_lock);
9364    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9365    if (pCB) {
9366        GLOBAL_CB_NODE *pSubCB = NULL;
9367        for (uint32_t i = 0; i < commandBuffersCount; i++) {
9368            pSubCB = getCBNode(dev_data, pCommandBuffers[i]);
9369            if (!pSubCB) {
9370                skipCall |=
9371                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9372                            DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9373                            "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p in element %u of pCommandBuffers array.",
9374                            (void *)pCommandBuffers[i], i);
9375            } else if (VK_COMMAND_BUFFER_LEVEL_PRIMARY == pSubCB->createInfo.level) {
9376                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9377                                    __LINE__, DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9378                                    "vkCmdExecuteCommands() called w/ Primary Cmd Buffer 0x%p in element %u of pCommandBuffers "
9379                                    "array. All cmd buffers in pCommandBuffers array must be secondary.",
9380                                    (void *)pCommandBuffers[i], i);
9381            } else if (pCB->activeRenderPass) { // Secondary CB w/i RenderPass must have *CONTINUE_BIT set
9382                if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
9383                    skipCall |= log_msg(
9384                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9385                        (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
9386                        "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) executed within render pass (0x%" PRIxLEAST64
9387                        ") must have had vkBeginCommandBuffer() called w/ VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set.",
9388                        (void *)pCommandBuffers[i], (uint64_t)pCB->activeRenderPass->renderPass);
9389                } else {
9390                    // Make sure render pass is compatible with parent command buffer pass if has continue
9391                    skipCall |= validateRenderPassCompatibility(dev_data, commandBuffer, pCB->activeRenderPass->renderPass, pCommandBuffers[i],
9392                                                                pSubCB->beginInfo.pInheritanceInfo->renderPass);
9393                    skipCall |= validateFramebuffer(dev_data, commandBuffer, pCB, pCommandBuffers[i], pSubCB);
9394                }
9395                string errorString = "";
9396                if (!verify_renderpass_compatibility(dev_data, pCB->activeRenderPass->renderPass,
9397                                                     pSubCB->beginInfo.pInheritanceInfo->renderPass, errorString)) {
9398                    skipCall |= log_msg(
9399                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9400                        (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
9401                        "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) w/ render pass (0x%" PRIxLEAST64
9402                        ") is incompatible w/ primary command buffer (0x%p) w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
9403                        (void *)pCommandBuffers[i], (uint64_t)pSubCB->beginInfo.pInheritanceInfo->renderPass, (void *)commandBuffer,
9404                        (uint64_t)pCB->activeRenderPass->renderPass, errorString.c_str());
9405                }
9406                //  If framebuffer for secondary CB is not NULL, then it must match FB from vkCmdBeginRenderPass()
9407                //   that this CB will be executed in AND framebuffer must have been created w/ RP compatible w/ renderpass
9408                if (pSubCB->beginInfo.pInheritanceInfo->framebuffer) {
9409                    if (pSubCB->beginInfo.pInheritanceInfo->framebuffer != pCB->activeRenderPassBeginInfo.framebuffer) {
9410                        skipCall |= log_msg(
9411                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9412                            (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_FRAMEBUFFER_INCOMPATIBLE, "DS",
9413                            "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) references framebuffer (0x%" PRIxLEAST64
9414                            ") that does not match framebuffer (0x%" PRIxLEAST64 ") in active renderpass (0x%" PRIxLEAST64 ").",
9415                            (void *)pCommandBuffers[i], (uint64_t)pSubCB->beginInfo.pInheritanceInfo->framebuffer,
9416                            (uint64_t)pCB->activeRenderPassBeginInfo.framebuffer, (uint64_t)pCB->activeRenderPass->renderPass);
9417                    }
9418                }
9419            }
9420            // TODO(mlentine): Move more logic into this method
9421            skipCall |= validateSecondaryCommandBufferState(dev_data, pCB, pSubCB);
9422            skipCall |= validateCommandBufferState(dev_data, pSubCB);
9423            // Secondary cmdBuffers are considered pending execution starting w/
9424            // being recorded
9425            if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
9426                if (dev_data->globalInFlightCmdBuffers.find(pSubCB->commandBuffer) != dev_data->globalInFlightCmdBuffers.end()) {
9427                    skipCall |= log_msg(
9428                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9429                        (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
9430                        "Attempt to simultaneously execute CB 0x%" PRIxLEAST64 " w/o VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
9431                        "set!",
9432                        (uint64_t)(pCB->commandBuffer));
9433                }
9434                if (pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) {
9435                    // Warn that non-simultaneous secondary cmd buffer renders primary non-simultaneous
9436                    skipCall |= log_msg(
9437                        dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9438                        (uint64_t)(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
9439                        "vkCmdExecuteCommands(): Secondary Command Buffer (0x%" PRIxLEAST64
9440                        ") does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary command buffer "
9441                        "(0x%" PRIxLEAST64 ") to be treated as if it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
9442                                          "set, even though it does.",
9443                        (uint64_t)(pCommandBuffers[i]), (uint64_t)(pCB->commandBuffer));
9444                    pCB->beginInfo.flags &= ~VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
9445                }
9446            }
9447            if (!pCB->activeQueries.empty() && !dev_data->phys_dev_properties.features.inheritedQueries) {
9448                skipCall |=
9449                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9450                            reinterpret_cast<uint64_t>(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
9451                            "vkCmdExecuteCommands(): Secondary Command Buffer "
9452                            "(0x%" PRIxLEAST64 ") cannot be submitted with a query in "
9453                            "flight and inherited queries not "
9454                            "supported on this device.",
9455                            reinterpret_cast<uint64_t>(pCommandBuffers[i]));
9456            }
9457            pSubCB->primaryCommandBuffer = pCB->commandBuffer;
9458            pCB->secondaryCommandBuffers.insert(pSubCB->commandBuffer);
9459            dev_data->globalInFlightCmdBuffers.insert(pSubCB->commandBuffer);
9460        }
9461        skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdExecuteComands");
9462        skipCall |= addCmd(dev_data, pCB, CMD_EXECUTECOMMANDS, "vkCmdExecuteComands()");
9463    }
9464    lock.unlock();
9465    if (!skipCall)
9466        dev_data->device_dispatch_table->CmdExecuteCommands(commandBuffer, commandBuffersCount, pCommandBuffers);
9467}
9468
9469static bool ValidateMapImageLayouts(VkDevice device, VkDeviceMemory mem) {
9470    bool skip_call = false;
9471    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9472    auto mem_info = getMemObjInfo(dev_data, mem);
9473    if ((mem_info) && (mem_info->image != VK_NULL_HANDLE)) {
9474        std::vector<VkImageLayout> layouts;
9475        if (FindLayouts(dev_data, mem_info->image, layouts)) {
9476            for (auto layout : layouts) {
9477                if (layout != VK_IMAGE_LAYOUT_PREINITIALIZED && layout != VK_IMAGE_LAYOUT_GENERAL) {
9478                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9479                                         __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot map an image with layout %s. Only "
9480                                                                                         "GENERAL or PREINITIALIZED are supported.",
9481                                         string_VkImageLayout(layout));
9482                }
9483            }
9484        }
9485    }
9486    return skip_call;
9487}
9488
9489VKAPI_ATTR VkResult VKAPI_CALL
9490MapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags, void **ppData) {
9491    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9492
9493    bool skip_call = false;
9494    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9495    std::unique_lock<std::mutex> lock(global_lock);
9496#if MTMERGESOURCE
9497    DEVICE_MEM_INFO *pMemObj = getMemObjInfo(dev_data, mem);
9498    if (pMemObj) {
9499        pMemObj->valid = true;
9500        if ((dev_data->phys_dev_mem_props.memoryTypes[pMemObj->allocInfo.memoryTypeIndex].propertyFlags &
9501             VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
9502            skip_call =
9503                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
9504                        (uint64_t)mem, __LINE__, MEMTRACK_INVALID_STATE, "MEM",
9505                        "Mapping Memory without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set: mem obj 0x%" PRIxLEAST64, (uint64_t)mem);
9506        }
9507    }
9508    skip_call |= validateMemRange(dev_data, mem, offset, size);
9509#endif
9510    skip_call |= ValidateMapImageLayouts(device, mem);
9511    lock.unlock();
9512
9513    if (!skip_call) {
9514        result = dev_data->device_dispatch_table->MapMemory(device, mem, offset, size, flags, ppData);
9515        if (VK_SUCCESS == result) {
9516#if MTMERGESOURCE
9517            lock.lock();
9518            storeMemRanges(dev_data, mem, offset, size);
9519            initializeAndTrackMemory(dev_data, mem, size, ppData);
9520            lock.unlock();
9521#endif
9522        }
9523    }
9524    return result;
9525}
9526
9527VKAPI_ATTR void VKAPI_CALL UnmapMemory(VkDevice device, VkDeviceMemory mem) {
9528    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9529    bool skipCall = false;
9530
9531    std::unique_lock<std::mutex> lock(global_lock);
9532    skipCall |= deleteMemRanges(my_data, mem);
9533    lock.unlock();
9534    if (!skipCall) {
9535        my_data->device_dispatch_table->UnmapMemory(device, mem);
9536    }
9537}
9538
9539static bool validateMemoryIsMapped(layer_data *my_data, const char *funcName, uint32_t memRangeCount,
9540                                   const VkMappedMemoryRange *pMemRanges) {
9541    bool skipCall = false;
9542    for (uint32_t i = 0; i < memRangeCount; ++i) {
9543        auto mem_info = getMemObjInfo(my_data, pMemRanges[i].memory);
9544        if (mem_info) {
9545            if (mem_info->memRange.offset > pMemRanges[i].offset) {
9546                skipCall |=
9547                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
9548                            (uint64_t)pMemRanges[i].memory, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
9549                            "%s: Flush/Invalidate offset (" PRINTF_SIZE_T_SPECIFIER ") is less than Memory Object's offset "
9550                            "(" PRINTF_SIZE_T_SPECIFIER ").",
9551                            funcName, static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(mem_info->memRange.offset));
9552            }
9553
9554            const uint64_t my_dataTerminus =
9555                    (mem_info->memRange.size == VK_WHOLE_SIZE) ? mem_info->allocInfo.allocationSize :
9556                                                                           (mem_info->memRange.offset + mem_info->memRange.size);
9557            if (pMemRanges[i].size != VK_WHOLE_SIZE && (my_dataTerminus < (pMemRanges[i].offset + pMemRanges[i].size))) {
9558                skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9559                                    VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
9560                                    MEMTRACK_INVALID_MAP, "MEM", "%s: Flush/Invalidate upper-bound (" PRINTF_SIZE_T_SPECIFIER
9561                                                                 ") exceeds the Memory Object's upper-bound "
9562                                                                 "(" PRINTF_SIZE_T_SPECIFIER ").",
9563                                    funcName, static_cast<size_t>(pMemRanges[i].offset + pMemRanges[i].size),
9564                                    static_cast<size_t>(my_dataTerminus));
9565            }
9566        }
9567    }
9568    return skipCall;
9569}
9570
9571static bool validateAndCopyNoncoherentMemoryToDriver(layer_data *my_data, uint32_t memRangeCount,
9572                                                     const VkMappedMemoryRange *pMemRanges) {
9573    bool skipCall = false;
9574    for (uint32_t i = 0; i < memRangeCount; ++i) {
9575        auto mem_info = getMemObjInfo(my_data, pMemRanges[i].memory);
9576        if (mem_info) {
9577            if (mem_info->pData) {
9578                VkDeviceSize size = mem_info->memRange.size;
9579                VkDeviceSize half_size = (size / 2);
9580                char *data = static_cast<char *>(mem_info->pData);
9581                for (auto j = 0; j < half_size; ++j) {
9582                    if (data[j] != NoncoherentMemoryFillValue) {
9583                        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9584                                            VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
9585                                            MEMTRACK_INVALID_MAP, "MEM", "Memory overflow was detected on mem obj 0x%" PRIxLEAST64,
9586                                            (uint64_t)pMemRanges[i].memory);
9587                    }
9588                }
9589                for (auto j = size + half_size; j < 2 * size; ++j) {
9590                    if (data[j] != NoncoherentMemoryFillValue) {
9591                        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9592                                            VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
9593                                            MEMTRACK_INVALID_MAP, "MEM", "Memory overflow was detected on mem obj 0x%" PRIxLEAST64,
9594                                            (uint64_t)pMemRanges[i].memory);
9595                    }
9596                }
9597                memcpy(mem_info->pDriverData, static_cast<void *>(data + (size_t)(half_size)), (size_t)(size));
9598            }
9599        }
9600    }
9601    return skipCall;
9602}
9603
9604VkResult VKAPI_CALL
9605FlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) {
9606    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9607    bool skipCall = false;
9608    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9609
9610    std::unique_lock<std::mutex> lock(global_lock);
9611    skipCall |= validateAndCopyNoncoherentMemoryToDriver(my_data, memRangeCount, pMemRanges);
9612    skipCall |= validateMemoryIsMapped(my_data, "vkFlushMappedMemoryRanges", memRangeCount, pMemRanges);
9613    lock.unlock();
9614    if (!skipCall) {
9615        result = my_data->device_dispatch_table->FlushMappedMemoryRanges(device, memRangeCount, pMemRanges);
9616    }
9617    return result;
9618}
9619
9620VkResult VKAPI_CALL
9621InvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) {
9622    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9623    bool skipCall = false;
9624    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9625
9626    std::unique_lock<std::mutex> lock(global_lock);
9627    skipCall |= validateMemoryIsMapped(my_data, "vkInvalidateMappedMemoryRanges", memRangeCount, pMemRanges);
9628    lock.unlock();
9629    if (!skipCall) {
9630        result = my_data->device_dispatch_table->InvalidateMappedMemoryRanges(device, memRangeCount, pMemRanges);
9631    }
9632    return result;
9633}
9634
9635VKAPI_ATTR VkResult VKAPI_CALL BindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
9636    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9637    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9638    bool skipCall = false;
9639    std::unique_lock<std::mutex> lock(global_lock);
9640    auto image_node = getImageNode(dev_data, image);
9641    if (image_node) {
9642        // Track objects tied to memory
9643        uint64_t image_handle = reinterpret_cast<uint64_t &>(image);
9644        skipCall = set_mem_binding(dev_data, mem, image_handle, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "vkBindImageMemory");
9645        VkMemoryRequirements memRequirements;
9646        lock.unlock();
9647        dev_data->device_dispatch_table->GetImageMemoryRequirements(device, image, &memRequirements);
9648        lock.lock();
9649
9650        // Track and validate bound memory range information
9651        auto mem_info = getMemObjInfo(dev_data, mem);
9652        if (mem_info) {
9653            const MEMORY_RANGE range =
9654                insert_memory_ranges(image_handle, mem, memoryOffset, memRequirements, mem_info->imageRanges);
9655            skipCall |= validate_memory_range(dev_data, mem_info->bufferRanges, range, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
9656            skipCall |= ValidateMemoryTypes(dev_data, mem_info, memRequirements.memoryTypeBits, "vkBindImageMemory");
9657        }
9658
9659        print_mem_list(dev_data);
9660        lock.unlock();
9661        if (!skipCall) {
9662            result = dev_data->device_dispatch_table->BindImageMemory(device, image, mem, memoryOffset);
9663            lock.lock();
9664            dev_data->memObjMap[mem].get()->image = image;
9665            image_node->mem = mem;
9666            image_node->memOffset = memoryOffset;
9667            image_node->memSize = memRequirements.size;
9668            lock.unlock();
9669        }
9670    } else {
9671        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
9672                reinterpret_cast<const uint64_t &>(image), __LINE__, MEMTRACK_INVALID_OBJECT, "MT",
9673                "vkBindImageMemory: Cannot find invalid image 0x%" PRIx64 ", has it already been deleted?",
9674                reinterpret_cast<const uint64_t &>(image));
9675    }
9676    return result;
9677}
9678
9679VKAPI_ATTR VkResult VKAPI_CALL SetEvent(VkDevice device, VkEvent event) {
9680    bool skip_call = false;
9681    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9682    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9683    std::unique_lock<std::mutex> lock(global_lock);
9684    auto event_node = dev_data->eventMap.find(event);
9685    if (event_node != dev_data->eventMap.end()) {
9686        event_node->second.needsSignaled = false;
9687        event_node->second.stageMask = VK_PIPELINE_STAGE_HOST_BIT;
9688        if (event_node->second.write_in_use) {
9689            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
9690                                 reinterpret_cast<const uint64_t &>(event), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
9691                                 "Cannot call vkSetEvent() on event 0x%" PRIxLEAST64 " that is already in use by a command buffer.",
9692                                 reinterpret_cast<const uint64_t &>(event));
9693        }
9694    }
9695    lock.unlock();
9696    // Host setting event is visible to all queues immediately so update stageMask for any queue that's seen this event
9697    // TODO : For correctness this needs separate fix to verify that app doesn't make incorrect assumptions about the
9698    // ordering of this command in relation to vkCmd[Set|Reset]Events (see GH297)
9699    for (auto queue_data : dev_data->queueMap) {
9700        auto event_entry = queue_data.second.eventToStageMap.find(event);
9701        if (event_entry != queue_data.second.eventToStageMap.end()) {
9702            event_entry->second |= VK_PIPELINE_STAGE_HOST_BIT;
9703        }
9704    }
9705    if (!skip_call)
9706        result = dev_data->device_dispatch_table->SetEvent(device, event);
9707    return result;
9708}
9709
9710VKAPI_ATTR VkResult VKAPI_CALL
9711QueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo, VkFence fence) {
9712    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
9713    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9714    bool skip_call = false;
9715    std::unique_lock<std::mutex> lock(global_lock);
9716    auto pFence = getFenceNode(dev_data, fence);
9717    auto pQueue = getQueueNode(dev_data, queue);
9718
9719    // First verify that fence is not in use
9720    skip_call |= ValidateFenceForSubmit(dev_data, pFence);
9721
9722    if (fence != VK_NULL_HANDLE) {
9723        SubmitFence(pQueue, pFence);
9724    }
9725
9726    for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) {
9727        const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx];
9728        // Track objects tied to memory
9729        for (uint32_t j = 0; j < bindInfo.bufferBindCount; j++) {
9730            for (uint32_t k = 0; k < bindInfo.pBufferBinds[j].bindCount; k++) {
9731                if (set_sparse_mem_binding(dev_data, bindInfo.pBufferBinds[j].pBinds[k].memory,
9732                                           (uint64_t)bindInfo.pBufferBinds[j].buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
9733                                           "vkQueueBindSparse"))
9734                    skip_call = true;
9735            }
9736        }
9737        for (uint32_t j = 0; j < bindInfo.imageOpaqueBindCount; j++) {
9738            for (uint32_t k = 0; k < bindInfo.pImageOpaqueBinds[j].bindCount; k++) {
9739                if (set_sparse_mem_binding(dev_data, bindInfo.pImageOpaqueBinds[j].pBinds[k].memory,
9740                                           (uint64_t)bindInfo.pImageOpaqueBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
9741                                           "vkQueueBindSparse"))
9742                    skip_call = true;
9743            }
9744        }
9745        for (uint32_t j = 0; j < bindInfo.imageBindCount; j++) {
9746            for (uint32_t k = 0; k < bindInfo.pImageBinds[j].bindCount; k++) {
9747                if (set_sparse_mem_binding(dev_data, bindInfo.pImageBinds[j].pBinds[k].memory,
9748                                           (uint64_t)bindInfo.pImageBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
9749                                           "vkQueueBindSparse"))
9750                    skip_call = true;
9751            }
9752        }
9753        for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) {
9754            const VkSemaphore &semaphore = bindInfo.pWaitSemaphores[i];
9755            if (dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) {
9756                if (dev_data->semaphoreMap[semaphore].signaled) {
9757                    dev_data->semaphoreMap[semaphore].signaled = false;
9758                } else {
9759                    skip_call |=
9760                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
9761                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
9762                                "vkQueueBindSparse: Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64
9763                                " that has no way to be signaled.",
9764                                reinterpret_cast<const uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
9765                }
9766            }
9767        }
9768        for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) {
9769            const VkSemaphore &semaphore = bindInfo.pSignalSemaphores[i];
9770            if (dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) {
9771                if (dev_data->semaphoreMap[semaphore].signaled) {
9772                    skip_call =
9773                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
9774                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
9775                                "vkQueueBindSparse: Queue 0x%" PRIx64 " is signaling semaphore 0x%" PRIx64
9776                                ", but that semaphore is already signaled.",
9777                                reinterpret_cast<const uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
9778                }
9779                dev_data->semaphoreMap[semaphore].signaled = true;
9780            }
9781        }
9782    }
9783    print_mem_list(dev_data);
9784    lock.unlock();
9785
9786    if (!skip_call)
9787        return dev_data->device_dispatch_table->QueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
9788
9789    return result;
9790}
9791
9792VKAPI_ATTR VkResult VKAPI_CALL CreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo,
9793                                               const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore) {
9794    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9795    VkResult result = dev_data->device_dispatch_table->CreateSemaphore(device, pCreateInfo, pAllocator, pSemaphore);
9796    if (result == VK_SUCCESS) {
9797        std::lock_guard<std::mutex> lock(global_lock);
9798        SEMAPHORE_NODE* sNode = &dev_data->semaphoreMap[*pSemaphore];
9799        sNode->signaled = false;
9800        sNode->queue = VK_NULL_HANDLE;
9801        sNode->in_use.store(0);
9802    }
9803    return result;
9804}
9805
9806VKAPI_ATTR VkResult VKAPI_CALL
9807CreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkEvent *pEvent) {
9808    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9809    VkResult result = dev_data->device_dispatch_table->CreateEvent(device, pCreateInfo, pAllocator, pEvent);
9810    if (result == VK_SUCCESS) {
9811        std::lock_guard<std::mutex> lock(global_lock);
9812        dev_data->eventMap[*pEvent].needsSignaled = false;
9813        dev_data->eventMap[*pEvent].in_use.store(0);
9814        dev_data->eventMap[*pEvent].write_in_use = 0;
9815        dev_data->eventMap[*pEvent].stageMask = VkPipelineStageFlags(0);
9816    }
9817    return result;
9818}
9819
9820VKAPI_ATTR VkResult VKAPI_CALL CreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
9821                                                  const VkAllocationCallbacks *pAllocator,
9822                                                  VkSwapchainKHR *pSwapchain) {
9823    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9824    VkResult result = dev_data->device_dispatch_table->CreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);
9825
9826    if (VK_SUCCESS == result) {
9827        std::lock_guard<std::mutex> lock(global_lock);
9828        dev_data->device_extensions.swapchainMap[*pSwapchain] = unique_ptr<SWAPCHAIN_NODE>(new SWAPCHAIN_NODE(pCreateInfo));
9829    }
9830
9831    return result;
9832}
9833
9834VKAPI_ATTR void VKAPI_CALL
9835DestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
9836    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9837    bool skipCall = false;
9838
9839    std::unique_lock<std::mutex> lock(global_lock);
9840    auto swapchain_data = getSwapchainNode(dev_data, swapchain);
9841    if (swapchain_data) {
9842        if (swapchain_data->images.size() > 0) {
9843            for (auto swapchain_image : swapchain_data->images) {
9844                auto image_sub = dev_data->imageSubresourceMap.find(swapchain_image);
9845                if (image_sub != dev_data->imageSubresourceMap.end()) {
9846                    for (auto imgsubpair : image_sub->second) {
9847                        auto image_item = dev_data->imageLayoutMap.find(imgsubpair);
9848                        if (image_item != dev_data->imageLayoutMap.end()) {
9849                            dev_data->imageLayoutMap.erase(image_item);
9850                        }
9851                    }
9852                    dev_data->imageSubresourceMap.erase(image_sub);
9853                }
9854                skipCall = clear_object_binding(dev_data, (uint64_t)swapchain_image,
9855                                                VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT);
9856                dev_data->imageMap.erase(swapchain_image);
9857            }
9858        }
9859        dev_data->device_extensions.swapchainMap.erase(swapchain);
9860    }
9861    lock.unlock();
9862    if (!skipCall)
9863        dev_data->device_dispatch_table->DestroySwapchainKHR(device, swapchain, pAllocator);
9864}
9865
9866VKAPI_ATTR VkResult VKAPI_CALL
9867GetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pCount, VkImage *pSwapchainImages) {
9868    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9869    VkResult result = dev_data->device_dispatch_table->GetSwapchainImagesKHR(device, swapchain, pCount, pSwapchainImages);
9870
9871    if (result == VK_SUCCESS && pSwapchainImages != NULL) {
9872        // This should never happen and is checked by param checker.
9873        if (!pCount)
9874            return result;
9875        std::lock_guard<std::mutex> lock(global_lock);
9876        const size_t count = *pCount;
9877        auto swapchain_node = getSwapchainNode(dev_data, swapchain);
9878        if (swapchain_node && !swapchain_node->images.empty()) {
9879            // TODO : Not sure I like the memcmp here, but it works
9880            const bool mismatch = (swapchain_node->images.size() != count ||
9881                                   memcmp(&swapchain_node->images[0], pSwapchainImages, sizeof(swapchain_node->images[0]) * count));
9882            if (mismatch) {
9883                // TODO: Verify against Valid Usage section of extension
9884                log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
9885                        (uint64_t)swapchain, __LINE__, MEMTRACK_NONE, "SWAP_CHAIN",
9886                        "vkGetSwapchainInfoKHR(0x%" PRIx64
9887                        ", VK_SWAP_CHAIN_INFO_TYPE_PERSISTENT_IMAGES_KHR) returned mismatching data",
9888                        (uint64_t)(swapchain));
9889            }
9890        }
9891        for (uint32_t i = 0; i < *pCount; ++i) {
9892            IMAGE_LAYOUT_NODE image_layout_node;
9893            image_layout_node.layout = VK_IMAGE_LAYOUT_UNDEFINED;
9894            image_layout_node.format = swapchain_node->createInfo.imageFormat;
9895            // Add imageMap entries for each swapchain image
9896            VkImageCreateInfo image_ci = {};
9897            image_ci.mipLevels = 1;
9898            image_ci.arrayLayers = swapchain_node->createInfo.imageArrayLayers;
9899            image_ci.usage = swapchain_node->createInfo.imageUsage;
9900            image_ci.format = swapchain_node->createInfo.imageFormat;
9901            image_ci.extent.width = swapchain_node->createInfo.imageExtent.width;
9902            image_ci.extent.height = swapchain_node->createInfo.imageExtent.height;
9903            image_ci.sharingMode = swapchain_node->createInfo.imageSharingMode;
9904            dev_data->imageMap[pSwapchainImages[i]] = unique_ptr<IMAGE_NODE>(new IMAGE_NODE(&image_ci));
9905            auto &image_node = dev_data->imageMap[pSwapchainImages[i]];
9906            image_node->valid = false;
9907            image_node->mem = MEMTRACKER_SWAP_CHAIN_IMAGE_KEY;
9908            swapchain_node->images.push_back(pSwapchainImages[i]);
9909            ImageSubresourcePair subpair = {pSwapchainImages[i], false, VkImageSubresource()};
9910            dev_data->imageSubresourceMap[pSwapchainImages[i]].push_back(subpair);
9911            dev_data->imageLayoutMap[subpair] = image_layout_node;
9912            dev_data->device_extensions.imageToSwapchainMap[pSwapchainImages[i]] = swapchain;
9913        }
9914    }
9915    return result;
9916}
9917
9918VKAPI_ATTR VkResult VKAPI_CALL QueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) {
9919    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
9920    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9921    bool skip_call = false;
9922
9923    if (pPresentInfo) {
9924        std::lock_guard<std::mutex> lock(global_lock);
9925        for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
9926            const VkSemaphore &semaphore = pPresentInfo->pWaitSemaphores[i];
9927            if (dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) {
9928                if (dev_data->semaphoreMap[semaphore].signaled) {
9929                    dev_data->semaphoreMap[semaphore].signaled = false;
9930                } else {
9931                    skip_call |=
9932                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9933                                VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
9934                                "Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.",
9935                                reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
9936                }
9937            }
9938        }
9939        VkDeviceMemory mem;
9940        for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
9941            auto swapchain_data = getSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
9942            if (swapchain_data && pPresentInfo->pImageIndices[i] < swapchain_data->images.size()) {
9943                VkImage image = swapchain_data->images[pPresentInfo->pImageIndices[i]];
9944#if MTMERGESOURCE
9945                skip_call |=
9946                    get_mem_binding_from_object(dev_data, (uint64_t)(image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
9947                skip_call |= validate_memory_is_valid(dev_data, mem, "vkQueuePresentKHR()", image);
9948#endif
9949                vector<VkImageLayout> layouts;
9950                if (FindLayouts(dev_data, image, layouts)) {
9951                    for (auto layout : layouts) {
9952                        if (layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) {
9953                            skip_call |=
9954                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
9955                                        reinterpret_cast<uint64_t &>(queue), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9956                                        "Images passed to present must be in layout "
9957                                        "PRESENT_SOURCE_KHR but is in %s",
9958                                        string_VkImageLayout(layout));
9959                        }
9960                    }
9961                }
9962            }
9963        }
9964    }
9965
9966    if (!skip_call)
9967        result = dev_data->device_dispatch_table->QueuePresentKHR(queue, pPresentInfo);
9968
9969    return result;
9970}
9971
9972VKAPI_ATTR VkResult VKAPI_CALL AcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
9973                                                   VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
9974    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9975    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9976    bool skipCall = false;
9977
9978    std::unique_lock<std::mutex> lock(global_lock);
9979    if (semaphore != VK_NULL_HANDLE &&
9980        dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) {
9981        if (dev_data->semaphoreMap[semaphore].signaled) {
9982            skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
9983                               reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
9984                               "vkAcquireNextImageKHR: Semaphore must not be currently signaled or in a wait state");
9985        }
9986        dev_data->semaphoreMap[semaphore].signaled = true;
9987    }
9988    auto fence_data = dev_data->fenceMap.find(fence);
9989    if (fence_data != dev_data->fenceMap.end()) {
9990        fence_data->second.swapchain = swapchain;
9991    }
9992    lock.unlock();
9993
9994    if (!skipCall) {
9995        result =
9996            dev_data->device_dispatch_table->AcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex);
9997    }
9998
9999    return result;
10000}
10001
10002VKAPI_ATTR VkResult VKAPI_CALL
10003CreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
10004                             const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) {
10005    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
10006    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
10007    VkResult res = pTable->CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
10008    if (VK_SUCCESS == res) {
10009        std::lock_guard<std::mutex> lock(global_lock);
10010        res = layer_create_msg_callback(my_data->report_data, false, pCreateInfo, pAllocator, pMsgCallback);
10011    }
10012    return res;
10013}
10014
10015VKAPI_ATTR void VKAPI_CALL DestroyDebugReportCallbackEXT(VkInstance instance,
10016                                                         VkDebugReportCallbackEXT msgCallback,
10017                                                         const VkAllocationCallbacks *pAllocator) {
10018    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
10019    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
10020    pTable->DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
10021    std::lock_guard<std::mutex> lock(global_lock);
10022    layer_destroy_msg_callback(my_data->report_data, msgCallback, pAllocator);
10023}
10024
10025VKAPI_ATTR void VKAPI_CALL
10026DebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object,
10027                      size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
10028    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
10029    my_data->instance_dispatch_table->DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix,
10030                                                            pMsg);
10031}
10032
10033VKAPI_ATTR VkResult VKAPI_CALL
10034EnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
10035    return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
10036}
10037
10038VKAPI_ATTR VkResult VKAPI_CALL
10039EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties *pProperties) {
10040    return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
10041}
10042
10043VKAPI_ATTR VkResult VKAPI_CALL
10044EnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties *pProperties) {
10045    if (pLayerName && !strcmp(pLayerName, global_layer.layerName))
10046        return util_GetExtensionProperties(1, instance_extensions, pCount, pProperties);
10047
10048    return VK_ERROR_LAYER_NOT_PRESENT;
10049}
10050
10051VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
10052                                                                  const char *pLayerName, uint32_t *pCount,
10053                                                                  VkExtensionProperties *pProperties) {
10054    if (pLayerName && !strcmp(pLayerName, global_layer.layerName))
10055        return util_GetExtensionProperties(0, NULL, pCount, pProperties);
10056
10057    assert(physicalDevice);
10058
10059    dispatch_key key = get_dispatch_key(physicalDevice);
10060    layer_data *my_data = get_my_data_ptr(key, layer_data_map);
10061    return my_data->instance_dispatch_table->EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties);
10062}
10063
10064static PFN_vkVoidFunction
10065intercept_core_instance_command(const char *name);
10066
10067static PFN_vkVoidFunction
10068intercept_core_device_command(const char *name);
10069
10070static PFN_vkVoidFunction
10071intercept_khr_swapchain_command(const char *name, VkDevice dev);
10072
10073VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetDeviceProcAddr(VkDevice dev, const char *funcName) {
10074    PFN_vkVoidFunction proc = intercept_core_device_command(funcName);
10075    if (proc)
10076        return proc;
10077
10078    assert(dev);
10079
10080    proc = intercept_khr_swapchain_command(funcName, dev);
10081    if (proc)
10082        return proc;
10083
10084    layer_data *dev_data;
10085    dev_data = get_my_data_ptr(get_dispatch_key(dev), layer_data_map);
10086
10087    VkLayerDispatchTable *pTable = dev_data->device_dispatch_table;
10088    {
10089        if (pTable->GetDeviceProcAddr == NULL)
10090            return NULL;
10091        return pTable->GetDeviceProcAddr(dev, funcName);
10092    }
10093}
10094
10095VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetInstanceProcAddr(VkInstance instance, const char *funcName) {
10096    PFN_vkVoidFunction proc = intercept_core_instance_command(funcName);
10097    if (!proc)
10098        proc = intercept_core_device_command(funcName);
10099    if (!proc)
10100        proc = intercept_khr_swapchain_command(funcName, VK_NULL_HANDLE);
10101    if (proc)
10102        return proc;
10103
10104    assert(instance);
10105
10106    layer_data *my_data;
10107    my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
10108    proc = debug_report_get_instance_proc_addr(my_data->report_data, funcName);
10109    if (proc)
10110        return proc;
10111
10112    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
10113    if (pTable->GetInstanceProcAddr == NULL)
10114        return NULL;
10115    return pTable->GetInstanceProcAddr(instance, funcName);
10116}
10117
10118static PFN_vkVoidFunction
10119intercept_core_instance_command(const char *name) {
10120    static const struct {
10121        const char *name;
10122        PFN_vkVoidFunction proc;
10123    } core_instance_commands[] = {
10124        { "vkGetInstanceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetInstanceProcAddr) },
10125        { "vkGetDeviceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceProcAddr) },
10126        { "vkCreateInstance", reinterpret_cast<PFN_vkVoidFunction>(CreateInstance) },
10127        { "vkCreateDevice", reinterpret_cast<PFN_vkVoidFunction>(CreateDevice) },
10128        { "vkDestroyInstance", reinterpret_cast<PFN_vkVoidFunction>(DestroyInstance) },
10129        { "vkEnumerateInstanceLayerProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateInstanceLayerProperties) },
10130        { "vkEnumerateDeviceLayerProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateDeviceLayerProperties) },
10131        { "vkEnumerateInstanceExtensionProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateInstanceExtensionProperties) },
10132        { "vkEnumerateDeviceExtensionProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateDeviceExtensionProperties) },
10133    };
10134
10135    for (size_t i = 0; i < ARRAY_SIZE(core_instance_commands); i++) {
10136        if (!strcmp(core_instance_commands[i].name, name))
10137            return core_instance_commands[i].proc;
10138    }
10139
10140    return nullptr;
10141}
10142
10143static PFN_vkVoidFunction
10144intercept_core_device_command(const char *name) {
10145    static const struct {
10146        const char *name;
10147        PFN_vkVoidFunction proc;
10148    } core_device_commands[] = {
10149        {"vkGetDeviceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceProcAddr)},
10150        {"vkQueueSubmit", reinterpret_cast<PFN_vkVoidFunction>(QueueSubmit)},
10151        {"vkWaitForFences", reinterpret_cast<PFN_vkVoidFunction>(WaitForFences)},
10152        {"vkGetFenceStatus", reinterpret_cast<PFN_vkVoidFunction>(GetFenceStatus)},
10153        {"vkQueueWaitIdle", reinterpret_cast<PFN_vkVoidFunction>(QueueWaitIdle)},
10154        {"vkDeviceWaitIdle", reinterpret_cast<PFN_vkVoidFunction>(DeviceWaitIdle)},
10155        {"vkGetDeviceQueue", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceQueue)},
10156        {"vkDestroyInstance", reinterpret_cast<PFN_vkVoidFunction>(DestroyInstance)},
10157        {"vkDestroyDevice", reinterpret_cast<PFN_vkVoidFunction>(DestroyDevice)},
10158        {"vkDestroyFence", reinterpret_cast<PFN_vkVoidFunction>(DestroyFence)},
10159        {"vkResetFences", reinterpret_cast<PFN_vkVoidFunction>(ResetFences)},
10160        {"vkDestroySemaphore", reinterpret_cast<PFN_vkVoidFunction>(DestroySemaphore)},
10161        {"vkDestroyEvent", reinterpret_cast<PFN_vkVoidFunction>(DestroyEvent)},
10162        {"vkDestroyQueryPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyQueryPool)},
10163        {"vkDestroyBuffer", reinterpret_cast<PFN_vkVoidFunction>(DestroyBuffer)},
10164        {"vkDestroyBufferView", reinterpret_cast<PFN_vkVoidFunction>(DestroyBufferView)},
10165        {"vkDestroyImage", reinterpret_cast<PFN_vkVoidFunction>(DestroyImage)},
10166        {"vkDestroyImageView", reinterpret_cast<PFN_vkVoidFunction>(DestroyImageView)},
10167        {"vkDestroyShaderModule", reinterpret_cast<PFN_vkVoidFunction>(DestroyShaderModule)},
10168        {"vkDestroyPipeline", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipeline)},
10169        {"vkDestroyPipelineLayout", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipelineLayout)},
10170        {"vkDestroySampler", reinterpret_cast<PFN_vkVoidFunction>(DestroySampler)},
10171        {"vkDestroyDescriptorSetLayout", reinterpret_cast<PFN_vkVoidFunction>(DestroyDescriptorSetLayout)},
10172        {"vkDestroyDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyDescriptorPool)},
10173        {"vkDestroyFramebuffer", reinterpret_cast<PFN_vkVoidFunction>(DestroyFramebuffer)},
10174        {"vkDestroyRenderPass", reinterpret_cast<PFN_vkVoidFunction>(DestroyRenderPass)},
10175        {"vkCreateBuffer", reinterpret_cast<PFN_vkVoidFunction>(CreateBuffer)},
10176        {"vkCreateBufferView", reinterpret_cast<PFN_vkVoidFunction>(CreateBufferView)},
10177        {"vkCreateImage", reinterpret_cast<PFN_vkVoidFunction>(CreateImage)},
10178        {"vkCreateImageView", reinterpret_cast<PFN_vkVoidFunction>(CreateImageView)},
10179        {"vkCreateFence", reinterpret_cast<PFN_vkVoidFunction>(CreateFence)},
10180        {"vkCreatePipelineCache", reinterpret_cast<PFN_vkVoidFunction>(CreatePipelineCache)},
10181        {"vkDestroyPipelineCache", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipelineCache)},
10182        {"vkGetPipelineCacheData", reinterpret_cast<PFN_vkVoidFunction>(GetPipelineCacheData)},
10183        {"vkMergePipelineCaches", reinterpret_cast<PFN_vkVoidFunction>(MergePipelineCaches)},
10184        {"vkCreateGraphicsPipelines", reinterpret_cast<PFN_vkVoidFunction>(CreateGraphicsPipelines)},
10185        {"vkCreateComputePipelines", reinterpret_cast<PFN_vkVoidFunction>(CreateComputePipelines)},
10186        {"vkCreateSampler", reinterpret_cast<PFN_vkVoidFunction>(CreateSampler)},
10187        {"vkCreateDescriptorSetLayout", reinterpret_cast<PFN_vkVoidFunction>(CreateDescriptorSetLayout)},
10188        {"vkCreatePipelineLayout", reinterpret_cast<PFN_vkVoidFunction>(CreatePipelineLayout)},
10189        {"vkCreateDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(CreateDescriptorPool)},
10190        {"vkResetDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(ResetDescriptorPool)},
10191        {"vkAllocateDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(AllocateDescriptorSets)},
10192        {"vkFreeDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(FreeDescriptorSets)},
10193        {"vkUpdateDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(UpdateDescriptorSets)},
10194        {"vkCreateCommandPool", reinterpret_cast<PFN_vkVoidFunction>(CreateCommandPool)},
10195        {"vkDestroyCommandPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyCommandPool)},
10196        {"vkResetCommandPool", reinterpret_cast<PFN_vkVoidFunction>(ResetCommandPool)},
10197        {"vkCreateQueryPool", reinterpret_cast<PFN_vkVoidFunction>(CreateQueryPool)},
10198        {"vkAllocateCommandBuffers", reinterpret_cast<PFN_vkVoidFunction>(AllocateCommandBuffers)},
10199        {"vkFreeCommandBuffers", reinterpret_cast<PFN_vkVoidFunction>(FreeCommandBuffers)},
10200        {"vkBeginCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(BeginCommandBuffer)},
10201        {"vkEndCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(EndCommandBuffer)},
10202        {"vkResetCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(ResetCommandBuffer)},
10203        {"vkCmdBindPipeline", reinterpret_cast<PFN_vkVoidFunction>(CmdBindPipeline)},
10204        {"vkCmdSetViewport", reinterpret_cast<PFN_vkVoidFunction>(CmdSetViewport)},
10205        {"vkCmdSetScissor", reinterpret_cast<PFN_vkVoidFunction>(CmdSetScissor)},
10206        {"vkCmdSetLineWidth", reinterpret_cast<PFN_vkVoidFunction>(CmdSetLineWidth)},
10207        {"vkCmdSetDepthBias", reinterpret_cast<PFN_vkVoidFunction>(CmdSetDepthBias)},
10208        {"vkCmdSetBlendConstants", reinterpret_cast<PFN_vkVoidFunction>(CmdSetBlendConstants)},
10209        {"vkCmdSetDepthBounds", reinterpret_cast<PFN_vkVoidFunction>(CmdSetDepthBounds)},
10210        {"vkCmdSetStencilCompareMask", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilCompareMask)},
10211        {"vkCmdSetStencilWriteMask", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilWriteMask)},
10212        {"vkCmdSetStencilReference", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilReference)},
10213        {"vkCmdBindDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(CmdBindDescriptorSets)},
10214        {"vkCmdBindVertexBuffers", reinterpret_cast<PFN_vkVoidFunction>(CmdBindVertexBuffers)},
10215        {"vkCmdBindIndexBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdBindIndexBuffer)},
10216        {"vkCmdDraw", reinterpret_cast<PFN_vkVoidFunction>(CmdDraw)},
10217        {"vkCmdDrawIndexed", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndexed)},
10218        {"vkCmdDrawIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndirect)},
10219        {"vkCmdDrawIndexedIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndexedIndirect)},
10220        {"vkCmdDispatch", reinterpret_cast<PFN_vkVoidFunction>(CmdDispatch)},
10221        {"vkCmdDispatchIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDispatchIndirect)},
10222        {"vkCmdCopyBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyBuffer)},
10223        {"vkCmdCopyImage", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyImage)},
10224        {"vkCmdBlitImage", reinterpret_cast<PFN_vkVoidFunction>(CmdBlitImage)},
10225        {"vkCmdCopyBufferToImage", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyBufferToImage)},
10226        {"vkCmdCopyImageToBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyImageToBuffer)},
10227        {"vkCmdUpdateBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdUpdateBuffer)},
10228        {"vkCmdFillBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdFillBuffer)},
10229        {"vkCmdClearColorImage", reinterpret_cast<PFN_vkVoidFunction>(CmdClearColorImage)},
10230        {"vkCmdClearDepthStencilImage", reinterpret_cast<PFN_vkVoidFunction>(CmdClearDepthStencilImage)},
10231        {"vkCmdClearAttachments", reinterpret_cast<PFN_vkVoidFunction>(CmdClearAttachments)},
10232        {"vkCmdResolveImage", reinterpret_cast<PFN_vkVoidFunction>(CmdResolveImage)},
10233        {"vkCmdSetEvent", reinterpret_cast<PFN_vkVoidFunction>(CmdSetEvent)},
10234        {"vkCmdResetEvent", reinterpret_cast<PFN_vkVoidFunction>(CmdResetEvent)},
10235        {"vkCmdWaitEvents", reinterpret_cast<PFN_vkVoidFunction>(CmdWaitEvents)},
10236        {"vkCmdPipelineBarrier", reinterpret_cast<PFN_vkVoidFunction>(CmdPipelineBarrier)},
10237        {"vkCmdBeginQuery", reinterpret_cast<PFN_vkVoidFunction>(CmdBeginQuery)},
10238        {"vkCmdEndQuery", reinterpret_cast<PFN_vkVoidFunction>(CmdEndQuery)},
10239        {"vkCmdResetQueryPool", reinterpret_cast<PFN_vkVoidFunction>(CmdResetQueryPool)},
10240        {"vkCmdCopyQueryPoolResults", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyQueryPoolResults)},
10241        {"vkCmdPushConstants", reinterpret_cast<PFN_vkVoidFunction>(CmdPushConstants)},
10242        {"vkCmdWriteTimestamp", reinterpret_cast<PFN_vkVoidFunction>(CmdWriteTimestamp)},
10243        {"vkCreateFramebuffer", reinterpret_cast<PFN_vkVoidFunction>(CreateFramebuffer)},
10244        {"vkCreateShaderModule", reinterpret_cast<PFN_vkVoidFunction>(CreateShaderModule)},
10245        {"vkCreateRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CreateRenderPass)},
10246        {"vkCmdBeginRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CmdBeginRenderPass)},
10247        {"vkCmdNextSubpass", reinterpret_cast<PFN_vkVoidFunction>(CmdNextSubpass)},
10248        {"vkCmdEndRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CmdEndRenderPass)},
10249        {"vkCmdExecuteCommands", reinterpret_cast<PFN_vkVoidFunction>(CmdExecuteCommands)},
10250        {"vkSetEvent", reinterpret_cast<PFN_vkVoidFunction>(SetEvent)},
10251        {"vkMapMemory", reinterpret_cast<PFN_vkVoidFunction>(MapMemory)},
10252        {"vkUnmapMemory", reinterpret_cast<PFN_vkVoidFunction>(UnmapMemory)},
10253        {"vkFlushMappedMemoryRanges", reinterpret_cast<PFN_vkVoidFunction>(FlushMappedMemoryRanges)},
10254        {"vkInvalidateMappedMemoryRanges", reinterpret_cast<PFN_vkVoidFunction>(InvalidateMappedMemoryRanges)},
10255        {"vkAllocateMemory", reinterpret_cast<PFN_vkVoidFunction>(AllocateMemory)},
10256        {"vkFreeMemory", reinterpret_cast<PFN_vkVoidFunction>(FreeMemory)},
10257        {"vkBindBufferMemory", reinterpret_cast<PFN_vkVoidFunction>(BindBufferMemory)},
10258        {"vkGetBufferMemoryRequirements", reinterpret_cast<PFN_vkVoidFunction>(GetBufferMemoryRequirements)},
10259        {"vkGetImageMemoryRequirements", reinterpret_cast<PFN_vkVoidFunction>(GetImageMemoryRequirements)},
10260        {"vkGetQueryPoolResults", reinterpret_cast<PFN_vkVoidFunction>(GetQueryPoolResults)},
10261        {"vkBindImageMemory", reinterpret_cast<PFN_vkVoidFunction>(BindImageMemory)},
10262        {"vkQueueBindSparse", reinterpret_cast<PFN_vkVoidFunction>(QueueBindSparse)},
10263        {"vkCreateSemaphore", reinterpret_cast<PFN_vkVoidFunction>(CreateSemaphore)},
10264        {"vkCreateEvent", reinterpret_cast<PFN_vkVoidFunction>(CreateEvent)},
10265    };
10266
10267    for (size_t i = 0; i < ARRAY_SIZE(core_device_commands); i++) {
10268        if (!strcmp(core_device_commands[i].name, name))
10269            return core_device_commands[i].proc;
10270    }
10271
10272    return nullptr;
10273}
10274
10275static PFN_vkVoidFunction
10276intercept_khr_swapchain_command(const char *name, VkDevice dev) {
10277    static const struct {
10278        const char *name;
10279        PFN_vkVoidFunction proc;
10280    } khr_swapchain_commands[] = {
10281        { "vkCreateSwapchainKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateSwapchainKHR) },
10282        { "vkDestroySwapchainKHR", reinterpret_cast<PFN_vkVoidFunction>(DestroySwapchainKHR) },
10283        { "vkGetSwapchainImagesKHR", reinterpret_cast<PFN_vkVoidFunction>(GetSwapchainImagesKHR) },
10284        { "vkAcquireNextImageKHR", reinterpret_cast<PFN_vkVoidFunction>(AcquireNextImageKHR) },
10285        { "vkQueuePresentKHR", reinterpret_cast<PFN_vkVoidFunction>(QueuePresentKHR) },
10286    };
10287
10288    if (dev) {
10289        layer_data *dev_data = get_my_data_ptr(get_dispatch_key(dev), layer_data_map);
10290        if (!dev_data->device_extensions.wsi_enabled)
10291            return nullptr;
10292    }
10293
10294    for (size_t i = 0; i < ARRAY_SIZE(khr_swapchain_commands); i++) {
10295        if (!strcmp(khr_swapchain_commands[i].name, name))
10296            return khr_swapchain_commands[i].proc;
10297    }
10298
10299    return nullptr;
10300}
10301
10302} // namespace core_validation
10303
10304// vk_layer_logging.h expects these to be defined
10305
10306VKAPI_ATTR VkResult VKAPI_CALL
10307vkCreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
10308                               const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) {
10309    return core_validation::CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
10310}
10311
10312VKAPI_ATTR void VKAPI_CALL
10313vkDestroyDebugReportCallbackEXT(VkInstance instance,
10314                                VkDebugReportCallbackEXT msgCallback,
10315                                const VkAllocationCallbacks *pAllocator) {
10316    core_validation::DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
10317}
10318
10319VKAPI_ATTR void VKAPI_CALL
10320vkDebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object,
10321                        size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
10322    core_validation::DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, pMsg);
10323}
10324
10325// loader-layer interface v0, just wrappers since there is only a layer
10326
10327VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
10328vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties *pProperties) {
10329    return core_validation::EnumerateInstanceExtensionProperties(pLayerName, pCount, pProperties);
10330}
10331
10332VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
10333vkEnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
10334    return core_validation::EnumerateInstanceLayerProperties(pCount, pProperties);
10335}
10336
10337VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
10338vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties *pProperties) {
10339    // the layer command handles VK_NULL_HANDLE just fine internally
10340    assert(physicalDevice == VK_NULL_HANDLE);
10341    return core_validation::EnumerateDeviceLayerProperties(VK_NULL_HANDLE, pCount, pProperties);
10342}
10343
10344VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
10345                                                                                    const char *pLayerName, uint32_t *pCount,
10346                                                                                    VkExtensionProperties *pProperties) {
10347    // the layer command handles VK_NULL_HANDLE just fine internally
10348    assert(physicalDevice == VK_NULL_HANDLE);
10349    return core_validation::EnumerateDeviceExtensionProperties(VK_NULL_HANDLE, pLayerName, pCount, pProperties);
10350}
10351
10352VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) {
10353    return core_validation::GetDeviceProcAddr(dev, funcName);
10354}
10355
10356VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) {
10357    return core_validation::GetInstanceProcAddr(instance, funcName);
10358}
10359