core_validation.cpp revision d7d60cccc862fee2d0b3ad410c5fdcc40ddc83ae
1/* Copyright (c) 2015-2016 The Khronos Group Inc.
2 * Copyright (c) 2015-2016 Valve Corporation
3 * Copyright (c) 2015-2016 LunarG, Inc.
4 * Copyright (C) 2015-2016 Google Inc.
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 *     http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * Author: Cody Northrop <cnorthrop@google.com>
19 * Author: Michael Lentine <mlentine@google.com>
20 * Author: Tobin Ehlis <tobine@google.com>
21 * Author: Chia-I Wu <olv@google.com>
22 * Author: Chris Forbes <chrisf@ijw.co.nz>
23 * Author: Mark Lobodzinski <mark@lunarg.com>
24 * Author: Ian Elliott <ianelliott@google.com>
25 */
26
27// Allow use of STL min and max functions in Windows
28#define NOMINMAX
29
30// Turn on mem_tracker merged code
31#define MTMERGESOURCE 1
32
33#include <SPIRV/spirv.hpp>
34#include <algorithm>
35#include <assert.h>
36#include <iostream>
37#include <list>
38#include <map>
39#include <mutex>
40#include <set>
41#include <stdio.h>
42#include <stdlib.h>
43#include <string.h>
44#include <string>
45#include <tuple>
46
47#include "vk_loader_platform.h"
48#include "vk_dispatch_table_helper.h"
49#include "vk_struct_string_helper_cpp.h"
50#if defined(__GNUC__)
51#pragma GCC diagnostic ignored "-Wwrite-strings"
52#endif
53#if defined(__GNUC__)
54#pragma GCC diagnostic warning "-Wwrite-strings"
55#endif
56#include "vk_struct_size_helper.h"
57#include "core_validation.h"
58#include "vk_layer_table.h"
59#include "vk_layer_data.h"
60#include "vk_layer_extension_utils.h"
61#include "vk_layer_utils.h"
62#include "spirv-tools/libspirv.h"
63
64#if defined __ANDROID__
65#include <android/log.h>
66#define LOGCONSOLE(...) ((void)__android_log_print(ANDROID_LOG_INFO, "DS", __VA_ARGS__))
67#else
68#define LOGCONSOLE(...)                                                                                                            \
69    {                                                                                                                              \
70        printf(__VA_ARGS__);                                                                                                       \
71        printf("\n");                                                                                                              \
72    }
73#endif
74
75using namespace std;
76
77// TODO : CB really needs it's own class and files so this is just temp code until that happens
78GLOBAL_CB_NODE::~GLOBAL_CB_NODE() {
79    for (uint32_t i=0; i<VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
80        // Make sure that no sets hold onto deleted CB binding
81        for (auto set : lastBound[i].uniqueBoundSets) {
82            set->RemoveBoundCommandBuffer(this);
83        }
84    }
85}
86
87namespace core_validation {
88
89using std::unordered_map;
90using std::unordered_set;
91
92// WSI Image Objects bypass usual Image Object creation methods.  A special Memory
93// Object value will be used to identify them internally.
94static const VkDeviceMemory MEMTRACKER_SWAP_CHAIN_IMAGE_KEY = (VkDeviceMemory)(-1);
95
96// Track command pools and their command buffers
97struct CMD_POOL_INFO {
98    VkCommandPoolCreateFlags createFlags;
99    uint32_t queueFamilyIndex;
100    list<VkCommandBuffer> commandBuffers; // list container of cmd buffers allocated from this pool
101};
102
103struct devExts {
104    bool wsi_enabled;
105    unordered_map<VkSwapchainKHR, unique_ptr<SWAPCHAIN_NODE>> swapchainMap;
106    unordered_map<VkImage, VkSwapchainKHR> imageToSwapchainMap;
107};
108
109// fwd decls
110struct shader_module;
111
112// TODO : Split this into separate structs for instance and device level data?
113struct layer_data {
114    VkInstance instance;
115
116    debug_report_data *report_data;
117    std::vector<VkDebugReportCallbackEXT> logging_callback;
118    VkLayerDispatchTable *device_dispatch_table;
119    VkLayerInstanceDispatchTable *instance_dispatch_table;
120
121    devExts device_extensions;
122    unordered_set<VkQueue> queues;  // all queues under given device
123    // Global set of all cmdBuffers that are inFlight on this device
124    unordered_set<VkCommandBuffer> globalInFlightCmdBuffers;
125    // Layer specific data
126    unordered_map<VkSampler, unique_ptr<SAMPLER_NODE>> samplerMap;
127    unordered_map<VkImageView, unique_ptr<VkImageViewCreateInfo>> imageViewMap;
128    unordered_map<VkImage, unique_ptr<IMAGE_NODE>> imageMap;
129    unordered_map<VkBufferView, unique_ptr<VkBufferViewCreateInfo>> bufferViewMap;
130    unordered_map<VkBuffer, unique_ptr<BUFFER_NODE>> bufferMap;
131    unordered_map<VkPipeline, PIPELINE_NODE *> pipelineMap;
132    unordered_map<VkCommandPool, CMD_POOL_INFO> commandPoolMap;
133    unordered_map<VkDescriptorPool, DESCRIPTOR_POOL_NODE *> descriptorPoolMap;
134    unordered_map<VkDescriptorSet, cvdescriptorset::DescriptorSet *> setMap;
135    unordered_map<VkDescriptorSetLayout, cvdescriptorset::DescriptorSetLayout *> descriptorSetLayoutMap;
136    unordered_map<VkPipelineLayout, PIPELINE_LAYOUT_NODE> pipelineLayoutMap;
137    unordered_map<VkDeviceMemory, unique_ptr<DEVICE_MEM_INFO>> memObjMap;
138    unordered_map<VkFence, FENCE_NODE> fenceMap;
139    unordered_map<VkQueue, QUEUE_NODE> queueMap;
140    unordered_map<VkEvent, EVENT_NODE> eventMap;
141    unordered_map<QueryObject, bool> queryToStateMap;
142    unordered_map<VkQueryPool, QUERY_POOL_NODE> queryPoolMap;
143    unordered_map<VkSemaphore, SEMAPHORE_NODE> semaphoreMap;
144    unordered_map<VkCommandBuffer, GLOBAL_CB_NODE *> commandBufferMap;
145    unordered_map<VkFramebuffer, FRAMEBUFFER_NODE> frameBufferMap;
146    unordered_map<VkImage, vector<ImageSubresourcePair>> imageSubresourceMap;
147    unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> imageLayoutMap;
148    unordered_map<VkRenderPass, RENDER_PASS_NODE *> renderPassMap;
149    unordered_map<VkShaderModule, unique_ptr<shader_module>> shaderModuleMap;
150    VkDevice device;
151
152    // Device specific data
153    PHYS_DEV_PROPERTIES_NODE phys_dev_properties;
154    VkPhysicalDeviceMemoryProperties phys_dev_mem_props;
155
156    layer_data()
157        : report_data(nullptr), device_dispatch_table(nullptr), instance_dispatch_table(nullptr), device_extensions(),
158          device(VK_NULL_HANDLE), phys_dev_properties{}, phys_dev_mem_props{} {};
159};
160
161// TODO : Do we need to guard access to layer_data_map w/ lock?
162static unordered_map<void *, layer_data *> layer_data_map;
163
164static const VkLayerProperties global_layer = {
165    "VK_LAYER_LUNARG_core_validation", VK_LAYER_API_VERSION, 1, "LunarG Validation Layer",
166};
167
168template <class TCreateInfo> void ValidateLayerOrdering(const TCreateInfo &createInfo) {
169    bool foundLayer = false;
170    for (uint32_t i = 0; i < createInfo.enabledLayerCount; ++i) {
171        if (!strcmp(createInfo.ppEnabledLayerNames[i], global_layer.layerName)) {
172            foundLayer = true;
173        }
174        // This has to be logged to console as we don't have a callback at this point.
175        if (!foundLayer && !strcmp(createInfo.ppEnabledLayerNames[0], "VK_LAYER_GOOGLE_unique_objects")) {
176            LOGCONSOLE("Cannot activate layer VK_LAYER_GOOGLE_unique_objects prior to activating %s.",
177                       global_layer.layerName);
178        }
179    }
180}
181
182// Code imported from shader_checker
183static void build_def_index(shader_module *);
184
185// A forward iterator over spirv instructions. Provides easy access to len, opcode, and content words
186// without the caller needing to care too much about the physical SPIRV module layout.
187struct spirv_inst_iter {
188    std::vector<uint32_t>::const_iterator zero;
189    std::vector<uint32_t>::const_iterator it;
190
191    uint32_t len() {
192        auto result = *it >> 16;
193        assert(result > 0);
194        return result;
195    }
196
197    uint32_t opcode() { return *it & 0x0ffffu; }
198
199    uint32_t const &word(unsigned n) {
200        assert(n < len());
201        return it[n];
202    }
203
204    uint32_t offset() { return (uint32_t)(it - zero); }
205
206    spirv_inst_iter() {}
207
208    spirv_inst_iter(std::vector<uint32_t>::const_iterator zero, std::vector<uint32_t>::const_iterator it) : zero(zero), it(it) {}
209
210    bool operator==(spirv_inst_iter const &other) { return it == other.it; }
211
212    bool operator!=(spirv_inst_iter const &other) { return it != other.it; }
213
214    spirv_inst_iter operator++(int) { /* x++ */
215        spirv_inst_iter ii = *this;
216        it += len();
217        return ii;
218    }
219
220    spirv_inst_iter operator++() { /* ++x; */
221        it += len();
222        return *this;
223    }
224
225    /* The iterator and the value are the same thing. */
226    spirv_inst_iter &operator*() { return *this; }
227    spirv_inst_iter const &operator*() const { return *this; }
228};
229
230struct shader_module {
231    /* the spirv image itself */
232    vector<uint32_t> words;
233    /* a mapping of <id> to the first word of its def. this is useful because walking type
234     * trees, constant expressions, etc requires jumping all over the instruction stream.
235     */
236    unordered_map<unsigned, unsigned> def_index;
237
238    shader_module(VkShaderModuleCreateInfo const *pCreateInfo)
239        : words((uint32_t *)pCreateInfo->pCode, (uint32_t *)pCreateInfo->pCode + pCreateInfo->codeSize / sizeof(uint32_t)),
240          def_index() {
241
242        build_def_index(this);
243    }
244
245    /* expose begin() / end() to enable range-based for */
246    spirv_inst_iter begin() const { return spirv_inst_iter(words.begin(), words.begin() + 5); } /* first insn */
247    spirv_inst_iter end() const { return spirv_inst_iter(words.begin(), words.end()); }         /* just past last insn */
248    /* given an offset into the module, produce an iterator there. */
249    spirv_inst_iter at(unsigned offset) const { return spirv_inst_iter(words.begin(), words.begin() + offset); }
250
251    /* gets an iterator to the definition of an id */
252    spirv_inst_iter get_def(unsigned id) const {
253        auto it = def_index.find(id);
254        if (it == def_index.end()) {
255            return end();
256        }
257        return at(it->second);
258    }
259};
260
261// TODO : This can be much smarter, using separate locks for separate global data
262static std::mutex global_lock;
263
264// Return ImageViewCreateInfo ptr for specified imageView or else NULL
265VkImageViewCreateInfo *getImageViewData(const layer_data *dev_data, VkImageView image_view) {
266    auto iv_it = dev_data->imageViewMap.find(image_view);
267    if (iv_it == dev_data->imageViewMap.end()) {
268        return nullptr;
269    }
270    return iv_it->second.get();
271}
272// Return sampler node ptr for specified sampler or else NULL
273SAMPLER_NODE *getSamplerNode(const layer_data *dev_data, VkSampler sampler) {
274    auto sampler_it = dev_data->samplerMap.find(sampler);
275    if (sampler_it == dev_data->samplerMap.end()) {
276        return nullptr;
277    }
278    return sampler_it->second.get();
279}
280// Return image node ptr for specified image or else NULL
281IMAGE_NODE *getImageNode(const layer_data *dev_data, VkImage image) {
282    auto img_it = dev_data->imageMap.find(image);
283    if (img_it == dev_data->imageMap.end()) {
284        return nullptr;
285    }
286    return img_it->second.get();
287}
288// Return buffer node ptr for specified buffer or else NULL
289BUFFER_NODE *getBufferNode(const layer_data *dev_data, VkBuffer buffer) {
290    auto buff_it = dev_data->bufferMap.find(buffer);
291    if (buff_it == dev_data->bufferMap.end()) {
292        return nullptr;
293    }
294    return buff_it->second.get();
295}
296// Return swapchain node for specified swapchain or else NULL
297SWAPCHAIN_NODE *getSwapchainNode(const layer_data *dev_data, VkSwapchainKHR swapchain) {
298    auto swp_it = dev_data->device_extensions.swapchainMap.find(swapchain);
299    if (swp_it == dev_data->device_extensions.swapchainMap.end()) {
300        return nullptr;
301    }
302    return swp_it->second.get();
303}
304// Return swapchain for specified image or else NULL
305VkSwapchainKHR getSwapchainFromImage(const layer_data *dev_data, VkImage image) {
306    auto img_it = dev_data->device_extensions.imageToSwapchainMap.find(image);
307    if (img_it == dev_data->device_extensions.imageToSwapchainMap.end()) {
308        return VK_NULL_HANDLE;
309    }
310    return img_it->second;
311}
312// Return buffer node ptr for specified buffer or else NULL
313VkBufferViewCreateInfo *getBufferViewInfo(const layer_data *my_data, VkBufferView buffer_view) {
314    auto bv_it = my_data->bufferViewMap.find(buffer_view);
315    if (bv_it == my_data->bufferViewMap.end()) {
316        return nullptr;
317    }
318    return bv_it->second.get();
319}
320
321FENCE_NODE *getFenceNode(layer_data *dev_data, VkFence fence) {
322    auto it = dev_data->fenceMap.find(fence);
323    if (it == dev_data->fenceMap.end()) {
324        return nullptr;
325    }
326    return &it->second;
327}
328
329QUEUE_NODE *getQueueNode(layer_data *dev_data, VkQueue queue) {
330    auto it = dev_data->queueMap.find(queue);
331    if (it == dev_data->queueMap.end()) {
332        return nullptr;
333    }
334    return &it->second;
335}
336
337static VkDeviceMemory *get_object_mem_binding(layer_data *my_data, uint64_t handle, VkDebugReportObjectTypeEXT type) {
338    switch (type) {
339    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
340        auto img_node = getImageNode(my_data, VkImage(handle));
341        if (img_node)
342            return &img_node->mem;
343        break;
344    }
345    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
346        auto buff_node = getBufferNode(my_data, VkBuffer(handle));
347        if (buff_node)
348            return &buff_node->mem;
349        break;
350    }
351    default:
352        break;
353    }
354    return nullptr;
355}
356
357// prototype
358static GLOBAL_CB_NODE *getCBNode(layer_data const *, const VkCommandBuffer);
359
360// Helper function to validate correct usage bits set for buffers or images
361//  Verify that (actual & desired) flags != 0 or,
362//   if strict is true, verify that (actual & desired) flags == desired
363//  In case of error, report it via dbg callbacks
364static bool validate_usage_flags(layer_data *my_data, VkFlags actual, VkFlags desired, VkBool32 strict,
365                                     uint64_t obj_handle, VkDebugReportObjectTypeEXT obj_type, char const *ty_str,
366                                     char const *func_name, char const *usage_str) {
367    bool correct_usage = false;
368    bool skipCall = false;
369    if (strict)
370        correct_usage = ((actual & desired) == desired);
371    else
372        correct_usage = ((actual & desired) != 0);
373    if (!correct_usage) {
374        skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_type, obj_handle, __LINE__,
375                           MEMTRACK_INVALID_USAGE_FLAG, "MEM", "Invalid usage flag for %s 0x%" PRIxLEAST64
376                                                               " used by %s. In this case, %s should have %s set during creation.",
377                           ty_str, obj_handle, func_name, ty_str, usage_str);
378    }
379    return skipCall;
380}
381
382// Helper function to validate usage flags for images
383// Pulls image info and then sends actual vs. desired usage off to helper above where
384//  an error will be flagged if usage is not correct
385static bool validate_image_usage_flags(layer_data *dev_data, VkImage image, VkFlags desired, VkBool32 strict,
386                                           char const *func_name, char const *usage_string) {
387    bool skipCall = false;
388    auto const image_node = getImageNode(dev_data, image);
389    if (image_node) {
390        skipCall = validate_usage_flags(dev_data, image_node->createInfo.usage, desired, strict, (uint64_t)image,
391                                        VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "image", func_name, usage_string);
392    }
393    return skipCall;
394}
395
396// Helper function to validate usage flags for buffers
397// Pulls buffer info and then sends actual vs. desired usage off to helper above where
398//  an error will be flagged if usage is not correct
399static bool validate_buffer_usage_flags(layer_data *dev_data, VkBuffer buffer, VkFlags desired, VkBool32 strict,
400                                            char const *func_name, char const *usage_string) {
401    bool skipCall = false;
402    auto buffer_node = getBufferNode(dev_data, buffer);
403    if (buffer_node) {
404        skipCall = validate_usage_flags(dev_data, buffer_node->createInfo.usage, desired, strict, (uint64_t)buffer,
405                                        VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "buffer", func_name, usage_string);
406    }
407    return skipCall;
408}
409
410// Return ptr to info in map container containing mem, or NULL if not found
411//  Calls to this function should be wrapped in mutex
412DEVICE_MEM_INFO *getMemObjInfo(const layer_data *dev_data, const VkDeviceMemory mem) {
413    auto mem_it = dev_data->memObjMap.find(mem);
414    if (mem_it == dev_data->memObjMap.end()) {
415        return NULL;
416    }
417    return mem_it->second.get();
418}
419
420static void add_mem_obj_info(layer_data *my_data, void *object, const VkDeviceMemory mem,
421                             const VkMemoryAllocateInfo *pAllocateInfo) {
422    assert(object != NULL);
423
424    my_data->memObjMap[mem] = unique_ptr<DEVICE_MEM_INFO>(new DEVICE_MEM_INFO(object, mem, pAllocateInfo));
425}
426
427static bool validate_memory_is_valid(layer_data *dev_data, VkDeviceMemory mem, const char *functionName,
428                                     VkImage image = VK_NULL_HANDLE) {
429    if (mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
430        auto const image_node = getImageNode(dev_data, image);
431        if (image_node && !image_node->valid) {
432            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
433                           (uint64_t)(mem), __LINE__, MEMTRACK_INVALID_USAGE_FLAG, "MEM",
434                           "%s: Cannot read invalid swapchain image 0x%" PRIx64 ", please fill the memory before using.",
435                           functionName, (uint64_t)(image));
436        }
437    } else {
438        DEVICE_MEM_INFO *pMemObj = getMemObjInfo(dev_data, mem);
439        if (pMemObj && !pMemObj->valid) {
440            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
441                           (uint64_t)(mem), __LINE__, MEMTRACK_INVALID_USAGE_FLAG, "MEM",
442                           "%s: Cannot read invalid memory 0x%" PRIx64 ", please fill the memory before using.", functionName,
443                           (uint64_t)(mem));
444        }
445    }
446    return false;
447}
448
449static void set_memory_valid(layer_data *dev_data, VkDeviceMemory mem, bool valid, VkImage image = VK_NULL_HANDLE) {
450    if (mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
451        auto image_node = getImageNode(dev_data, image);
452        if (image_node) {
453            image_node->valid = valid;
454        }
455    } else {
456        DEVICE_MEM_INFO *pMemObj = getMemObjInfo(dev_data, mem);
457        if (pMemObj) {
458            pMemObj->valid = valid;
459        }
460    }
461}
462
463// Find CB Info and add mem reference to list container
464// Find Mem Obj Info and add CB reference to list container
465static bool update_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb, const VkDeviceMemory mem,
466                                              const char *apiName) {
467    bool skipCall = false;
468
469    // Skip validation if this image was created through WSI
470    if (mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
471
472        // First update CB binding in MemObj mini CB list
473        DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, mem);
474        if (pMemInfo) {
475            pMemInfo->commandBufferBindings.insert(cb);
476            // Now update CBInfo's Mem reference list
477            GLOBAL_CB_NODE *pCBNode = getCBNode(dev_data, cb);
478            // TODO: keep track of all destroyed CBs so we know if this is a stale or simply invalid object
479            if (pCBNode) {
480                pCBNode->memObjs.insert(mem);
481            }
482        }
483    }
484    return skipCall;
485}
486// For every mem obj bound to particular CB, free bindings related to that CB
487static void clear_cmd_buf_and_mem_references(layer_data *dev_data, GLOBAL_CB_NODE *pCBNode) {
488    if (pCBNode) {
489        if (pCBNode->memObjs.size() > 0) {
490            for (auto mem : pCBNode->memObjs) {
491                DEVICE_MEM_INFO *pInfo = getMemObjInfo(dev_data, mem);
492                if (pInfo) {
493                    pInfo->commandBufferBindings.erase(pCBNode->commandBuffer);
494                }
495            }
496            pCBNode->memObjs.clear();
497        }
498        pCBNode->validate_functions.clear();
499    }
500}
501// Overloaded call to above function when GLOBAL_CB_NODE has not already been looked-up
502static void clear_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb) {
503    clear_cmd_buf_and_mem_references(dev_data, getCBNode(dev_data, cb));
504}
505
506// For given MemObjInfo, report Obj & CB bindings
507static bool reportMemReferencesAndCleanUp(layer_data *dev_data, DEVICE_MEM_INFO *pMemObjInfo) {
508    bool skipCall = false;
509    size_t cmdBufRefCount = pMemObjInfo->commandBufferBindings.size();
510    size_t objRefCount = pMemObjInfo->objBindings.size();
511
512    if ((pMemObjInfo->commandBufferBindings.size()) != 0) {
513        skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
514                           (uint64_t)pMemObjInfo->mem, __LINE__, MEMTRACK_FREED_MEM_REF, "MEM",
515                           "Attempting to free memory object 0x%" PRIxLEAST64 " which still contains " PRINTF_SIZE_T_SPECIFIER
516                           " references",
517                           (uint64_t)pMemObjInfo->mem, (cmdBufRefCount + objRefCount));
518    }
519
520    if (cmdBufRefCount > 0 && pMemObjInfo->commandBufferBindings.size() > 0) {
521        for (auto cb : pMemObjInfo->commandBufferBindings) {
522            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
523                    (uint64_t)cb, __LINE__, MEMTRACK_FREED_MEM_REF, "MEM",
524                    "Command Buffer 0x%p still has a reference to mem obj 0x%" PRIxLEAST64, cb, (uint64_t)pMemObjInfo->mem);
525        }
526        // Clear the list of hanging references
527        pMemObjInfo->commandBufferBindings.clear();
528    }
529
530    if (objRefCount > 0 && pMemObjInfo->objBindings.size() > 0) {
531        for (auto obj : pMemObjInfo->objBindings) {
532            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, obj.type, obj.handle, __LINE__,
533                    MEMTRACK_FREED_MEM_REF, "MEM", "VK Object 0x%" PRIxLEAST64 " still has a reference to mem obj 0x%" PRIxLEAST64,
534                    obj.handle, (uint64_t)pMemObjInfo->mem);
535        }
536        // Clear the list of hanging references
537        pMemObjInfo->objBindings.clear();
538    }
539    return skipCall;
540}
541
542static bool deleteMemObjInfo(layer_data *my_data, void *object, VkDeviceMemory mem) {
543    bool skipCall = false;
544    auto item = my_data->memObjMap.find(mem);
545    if (item != my_data->memObjMap.end()) {
546        my_data->memObjMap.erase(item);
547    } else {
548        skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
549                           (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MEM_OBJ, "MEM",
550                           "Request to delete memory object 0x%" PRIxLEAST64 " not present in memory Object Map", (uint64_t)mem);
551    }
552    return skipCall;
553}
554
555static bool freeMemObjInfo(layer_data *dev_data, void *object, VkDeviceMemory mem, bool internal) {
556    bool skipCall = false;
557    // Parse global list to find info w/ mem
558    DEVICE_MEM_INFO *pInfo = getMemObjInfo(dev_data, mem);
559    if (pInfo) {
560        if (pInfo->allocInfo.allocationSize == 0 && !internal) {
561            // TODO: Verify against Valid Use section
562            skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
563                               (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MEM_OBJ, "MEM",
564                               "Attempting to free memory associated with a Persistent Image, 0x%" PRIxLEAST64 ", "
565                               "this should not be explicitly freed\n",
566                               (uint64_t)mem);
567        } else {
568            // Clear any CB bindings for completed CBs
569            //   TODO : Is there a better place to do this?
570
571            assert(pInfo->object != VK_NULL_HANDLE);
572            // clear_cmd_buf_and_mem_references removes elements from
573            // pInfo->commandBufferBindings -- this copy not needed in c++14,
574            // and probably not needed in practice in c++11
575            auto bindings = pInfo->commandBufferBindings;
576            for (auto cb : bindings) {
577                if (!dev_data->globalInFlightCmdBuffers.count(cb)) {
578                    clear_cmd_buf_and_mem_references(dev_data, cb);
579                }
580            }
581
582            // Now verify that no references to this mem obj remain and remove bindings
583            if (pInfo->commandBufferBindings.size() || pInfo->objBindings.size()) {
584                skipCall |= reportMemReferencesAndCleanUp(dev_data, pInfo);
585            }
586            // Delete mem obj info
587            skipCall |= deleteMemObjInfo(dev_data, object, mem);
588        }
589    }
590    return skipCall;
591}
592
593static const char *object_type_to_string(VkDebugReportObjectTypeEXT type) {
594    switch (type) {
595    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT:
596        return "image";
597    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
598        return "buffer";
599    case VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT:
600        return "swapchain";
601    default:
602        return "unknown";
603    }
604}
605
606// Remove object binding performs 3 tasks:
607// 1. Remove ObjectInfo from MemObjInfo list container of obj bindings & free it
608// 2. Clear mem binding for image/buffer by setting its handle to 0
609// TODO : This only applied to Buffer, Image, and Swapchain objects now, how should it be updated/customized?
610static bool clear_object_binding(layer_data *dev_data, uint64_t handle, VkDebugReportObjectTypeEXT type) {
611    // TODO : Need to customize images/buffers/swapchains to track mem binding and clear it here appropriately
612    bool skipCall = false;
613    VkDeviceMemory *pMemBinding = get_object_mem_binding(dev_data, handle, type);
614    if (pMemBinding) {
615        DEVICE_MEM_INFO *pMemObjInfo = getMemObjInfo(dev_data, *pMemBinding);
616        // TODO : Make sure this is a reasonable way to reset mem binding
617        *pMemBinding = VK_NULL_HANDLE;
618        if (pMemObjInfo) {
619            // This obj is bound to a memory object. Remove the reference to this object in that memory object's list,
620            // and set the objects memory binding pointer to NULL.
621            if (!pMemObjInfo->objBindings.erase({handle, type})) {
622                skipCall |=
623                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT,
624                            "MEM", "While trying to clear mem binding for %s obj 0x%" PRIxLEAST64
625                                   ", unable to find that object referenced by mem obj 0x%" PRIxLEAST64,
626                            object_type_to_string(type), handle, (uint64_t)pMemObjInfo->mem);
627            }
628        }
629    }
630    return skipCall;
631}
632
633// For NULL mem case, output warning
634// Make sure given object is in global object map
635//  IF a previous binding existed, output validation error
636//  Otherwise, add reference from objectInfo to memoryInfo
637//  Add reference off of objInfo
638static bool set_mem_binding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle,
639                                VkDebugReportObjectTypeEXT type, const char *apiName) {
640    bool skipCall = false;
641    // Handle NULL case separately, just clear previous binding & decrement reference
642    if (mem == VK_NULL_HANDLE) {
643        // TODO: Verify against Valid Use section of spec.
644        skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_MEM_OBJ,
645                           "MEM", "In %s, attempting to Bind Obj(0x%" PRIxLEAST64 ") to NULL", apiName, handle);
646    } else {
647        VkDeviceMemory *pMemBinding = get_object_mem_binding(dev_data, handle, type);
648        assert(pMemBinding);
649        DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, mem);
650        if (pMemInfo) {
651            DEVICE_MEM_INFO *pPrevBinding = getMemObjInfo(dev_data, *pMemBinding);
652            if (pPrevBinding != NULL) {
653                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
654                                    VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_REBIND_OBJECT,
655                                    "MEM", "In %s, attempting to bind memory (0x%" PRIxLEAST64 ") to object (0x%" PRIxLEAST64
656                                           ") which has already been bound to mem object 0x%" PRIxLEAST64,
657                                    apiName, (uint64_t)mem, handle, (uint64_t)pPrevBinding->mem);
658            } else {
659                pMemInfo->objBindings.insert({handle, type});
660                // For image objects, make sure default memory state is correctly set
661                // TODO : What's the best/correct way to handle this?
662                if (VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT == type) {
663                    auto const image_node = getImageNode(dev_data, VkImage(handle));
664                    if (image_node) {
665                        VkImageCreateInfo ici = image_node->createInfo;
666                        if (ici.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
667                            // TODO::  More memory state transition stuff.
668                        }
669                    }
670                }
671                *pMemBinding = mem;
672            }
673        }
674    }
675    return skipCall;
676}
677
678// For NULL mem case, clear any previous binding Else...
679// Make sure given object is in its object map
680//  IF a previous binding existed, update binding
681//  Add reference from objectInfo to memoryInfo
682//  Add reference off of object's binding info
683// Return VK_TRUE if addition is successful, VK_FALSE otherwise
684static bool set_sparse_mem_binding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle,
685                                       VkDebugReportObjectTypeEXT type, const char *apiName) {
686    bool skipCall = VK_FALSE;
687    // Handle NULL case separately, just clear previous binding & decrement reference
688    if (mem == VK_NULL_HANDLE) {
689        skipCall = clear_object_binding(dev_data, handle, type);
690    } else {
691        VkDeviceMemory *pMemBinding = get_object_mem_binding(dev_data, handle, type);
692        assert(pMemBinding);
693        DEVICE_MEM_INFO *pInfo = getMemObjInfo(dev_data, mem);
694        if (pInfo) {
695            pInfo->objBindings.insert({handle, type});
696            // Need to set mem binding for this object
697            *pMemBinding = mem;
698        }
699    }
700    return skipCall;
701}
702
703// For given Object, get 'mem' obj that it's bound to or NULL if no binding
704static bool get_mem_binding_from_object(layer_data *dev_data, const uint64_t handle,
705                                            const VkDebugReportObjectTypeEXT type, VkDeviceMemory *mem) {
706    bool skipCall = false;
707    *mem = VK_NULL_HANDLE;
708    VkDeviceMemory *pMemBinding = get_object_mem_binding(dev_data, handle, type);
709    if (pMemBinding) {
710        *mem = *pMemBinding;
711    } else {
712        skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT,
713                           "MEM", "Trying to get mem binding for object 0x%" PRIxLEAST64 " but no such object in %s list", handle,
714                           object_type_to_string(type));
715    }
716    return skipCall;
717}
718
719// Print details of MemObjInfo list
720static void print_mem_list(layer_data *dev_data) {
721    // Early out if info is not requested
722    if (!(dev_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
723        return;
724    }
725
726    // Just printing each msg individually for now, may want to package these into single large print
727    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
728            MEMTRACK_NONE, "MEM", "Details of Memory Object list (of size " PRINTF_SIZE_T_SPECIFIER " elements)",
729            dev_data->memObjMap.size());
730    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
731            MEMTRACK_NONE, "MEM", "=============================");
732
733    if (dev_data->memObjMap.size() <= 0)
734        return;
735
736    for (auto ii = dev_data->memObjMap.begin(); ii != dev_data->memObjMap.end(); ++ii) {
737        auto mem_info = (*ii).second.get();
738
739        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
740                __LINE__, MEMTRACK_NONE, "MEM", "    ===MemObjInfo at 0x%p===", (void *)mem_info);
741        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
742                __LINE__, MEMTRACK_NONE, "MEM", "    Mem object: 0x%" PRIxLEAST64, (uint64_t)(mem_info->mem));
743        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
744                __LINE__, MEMTRACK_NONE, "MEM", "    Ref Count: " PRINTF_SIZE_T_SPECIFIER,
745                mem_info->commandBufferBindings.size() + mem_info->objBindings.size());
746        if (0 != mem_info->allocInfo.allocationSize) {
747            string pAllocInfoMsg = vk_print_vkmemoryallocateinfo(&mem_info->allocInfo, "MEM(INFO):         ");
748            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
749                    __LINE__, MEMTRACK_NONE, "MEM", "    Mem Alloc info:\n%s", pAllocInfoMsg.c_str());
750        } else {
751            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
752                    __LINE__, MEMTRACK_NONE, "MEM", "    Mem Alloc info is NULL (alloc done by vkCreateSwapchainKHR())");
753        }
754
755        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
756                __LINE__, MEMTRACK_NONE, "MEM", "    VK OBJECT Binding list of size " PRINTF_SIZE_T_SPECIFIER " elements:",
757                mem_info->objBindings.size());
758        if (mem_info->objBindings.size() > 0) {
759            for (auto obj : mem_info->objBindings) {
760                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
761                        0, __LINE__, MEMTRACK_NONE, "MEM", "       VK OBJECT 0x%" PRIx64, obj.handle);
762            }
763        }
764
765        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
766                __LINE__, MEMTRACK_NONE, "MEM",
767                "    VK Command Buffer (CB) binding list of size " PRINTF_SIZE_T_SPECIFIER " elements",
768                mem_info->commandBufferBindings.size());
769        if (mem_info->commandBufferBindings.size() > 0) {
770            for (auto cb : mem_info->commandBufferBindings) {
771                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
772                        0, __LINE__, MEMTRACK_NONE, "MEM", "      VK CB 0x%p", cb);
773            }
774        }
775    }
776}
777
778static void printCBList(layer_data *my_data) {
779    GLOBAL_CB_NODE *pCBInfo = NULL;
780
781    // Early out if info is not requested
782    if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
783        return;
784    }
785
786    log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
787            MEMTRACK_NONE, "MEM", "Details of CB list (of size " PRINTF_SIZE_T_SPECIFIER " elements)",
788            my_data->commandBufferMap.size());
789    log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
790            MEMTRACK_NONE, "MEM", "==================");
791
792    if (my_data->commandBufferMap.size() <= 0)
793        return;
794
795    for (auto &cb_node : my_data->commandBufferMap) {
796        pCBInfo = cb_node.second;
797
798        log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
799                __LINE__, MEMTRACK_NONE, "MEM", "    CB Info (0x%p) has CB 0x%p", (void *)pCBInfo, (void *)pCBInfo->commandBuffer);
800
801        if (pCBInfo->memObjs.size() <= 0)
802            continue;
803        for (auto obj : pCBInfo->memObjs) {
804            log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
805                    __LINE__, MEMTRACK_NONE, "MEM", "      Mem obj 0x%" PRIx64, (uint64_t)obj);
806        }
807    }
808}
809
810// Return a string representation of CMD_TYPE enum
811static string cmdTypeToString(CMD_TYPE cmd) {
812    switch (cmd) {
813    case CMD_BINDPIPELINE:
814        return "CMD_BINDPIPELINE";
815    case CMD_BINDPIPELINEDELTA:
816        return "CMD_BINDPIPELINEDELTA";
817    case CMD_SETVIEWPORTSTATE:
818        return "CMD_SETVIEWPORTSTATE";
819    case CMD_SETLINEWIDTHSTATE:
820        return "CMD_SETLINEWIDTHSTATE";
821    case CMD_SETDEPTHBIASSTATE:
822        return "CMD_SETDEPTHBIASSTATE";
823    case CMD_SETBLENDSTATE:
824        return "CMD_SETBLENDSTATE";
825    case CMD_SETDEPTHBOUNDSSTATE:
826        return "CMD_SETDEPTHBOUNDSSTATE";
827    case CMD_SETSTENCILREADMASKSTATE:
828        return "CMD_SETSTENCILREADMASKSTATE";
829    case CMD_SETSTENCILWRITEMASKSTATE:
830        return "CMD_SETSTENCILWRITEMASKSTATE";
831    case CMD_SETSTENCILREFERENCESTATE:
832        return "CMD_SETSTENCILREFERENCESTATE";
833    case CMD_BINDDESCRIPTORSETS:
834        return "CMD_BINDDESCRIPTORSETS";
835    case CMD_BINDINDEXBUFFER:
836        return "CMD_BINDINDEXBUFFER";
837    case CMD_BINDVERTEXBUFFER:
838        return "CMD_BINDVERTEXBUFFER";
839    case CMD_DRAW:
840        return "CMD_DRAW";
841    case CMD_DRAWINDEXED:
842        return "CMD_DRAWINDEXED";
843    case CMD_DRAWINDIRECT:
844        return "CMD_DRAWINDIRECT";
845    case CMD_DRAWINDEXEDINDIRECT:
846        return "CMD_DRAWINDEXEDINDIRECT";
847    case CMD_DISPATCH:
848        return "CMD_DISPATCH";
849    case CMD_DISPATCHINDIRECT:
850        return "CMD_DISPATCHINDIRECT";
851    case CMD_COPYBUFFER:
852        return "CMD_COPYBUFFER";
853    case CMD_COPYIMAGE:
854        return "CMD_COPYIMAGE";
855    case CMD_BLITIMAGE:
856        return "CMD_BLITIMAGE";
857    case CMD_COPYBUFFERTOIMAGE:
858        return "CMD_COPYBUFFERTOIMAGE";
859    case CMD_COPYIMAGETOBUFFER:
860        return "CMD_COPYIMAGETOBUFFER";
861    case CMD_CLONEIMAGEDATA:
862        return "CMD_CLONEIMAGEDATA";
863    case CMD_UPDATEBUFFER:
864        return "CMD_UPDATEBUFFER";
865    case CMD_FILLBUFFER:
866        return "CMD_FILLBUFFER";
867    case CMD_CLEARCOLORIMAGE:
868        return "CMD_CLEARCOLORIMAGE";
869    case CMD_CLEARATTACHMENTS:
870        return "CMD_CLEARCOLORATTACHMENT";
871    case CMD_CLEARDEPTHSTENCILIMAGE:
872        return "CMD_CLEARDEPTHSTENCILIMAGE";
873    case CMD_RESOLVEIMAGE:
874        return "CMD_RESOLVEIMAGE";
875    case CMD_SETEVENT:
876        return "CMD_SETEVENT";
877    case CMD_RESETEVENT:
878        return "CMD_RESETEVENT";
879    case CMD_WAITEVENTS:
880        return "CMD_WAITEVENTS";
881    case CMD_PIPELINEBARRIER:
882        return "CMD_PIPELINEBARRIER";
883    case CMD_BEGINQUERY:
884        return "CMD_BEGINQUERY";
885    case CMD_ENDQUERY:
886        return "CMD_ENDQUERY";
887    case CMD_RESETQUERYPOOL:
888        return "CMD_RESETQUERYPOOL";
889    case CMD_COPYQUERYPOOLRESULTS:
890        return "CMD_COPYQUERYPOOLRESULTS";
891    case CMD_WRITETIMESTAMP:
892        return "CMD_WRITETIMESTAMP";
893    case CMD_INITATOMICCOUNTERS:
894        return "CMD_INITATOMICCOUNTERS";
895    case CMD_LOADATOMICCOUNTERS:
896        return "CMD_LOADATOMICCOUNTERS";
897    case CMD_SAVEATOMICCOUNTERS:
898        return "CMD_SAVEATOMICCOUNTERS";
899    case CMD_BEGINRENDERPASS:
900        return "CMD_BEGINRENDERPASS";
901    case CMD_ENDRENDERPASS:
902        return "CMD_ENDRENDERPASS";
903    default:
904        return "UNKNOWN";
905    }
906}
907
908// SPIRV utility functions
909static void build_def_index(shader_module *module) {
910    for (auto insn : *module) {
911        switch (insn.opcode()) {
912        /* Types */
913        case spv::OpTypeVoid:
914        case spv::OpTypeBool:
915        case spv::OpTypeInt:
916        case spv::OpTypeFloat:
917        case spv::OpTypeVector:
918        case spv::OpTypeMatrix:
919        case spv::OpTypeImage:
920        case spv::OpTypeSampler:
921        case spv::OpTypeSampledImage:
922        case spv::OpTypeArray:
923        case spv::OpTypeRuntimeArray:
924        case spv::OpTypeStruct:
925        case spv::OpTypeOpaque:
926        case spv::OpTypePointer:
927        case spv::OpTypeFunction:
928        case spv::OpTypeEvent:
929        case spv::OpTypeDeviceEvent:
930        case spv::OpTypeReserveId:
931        case spv::OpTypeQueue:
932        case spv::OpTypePipe:
933            module->def_index[insn.word(1)] = insn.offset();
934            break;
935
936        /* Fixed constants */
937        case spv::OpConstantTrue:
938        case spv::OpConstantFalse:
939        case spv::OpConstant:
940        case spv::OpConstantComposite:
941        case spv::OpConstantSampler:
942        case spv::OpConstantNull:
943            module->def_index[insn.word(2)] = insn.offset();
944            break;
945
946        /* Specialization constants */
947        case spv::OpSpecConstantTrue:
948        case spv::OpSpecConstantFalse:
949        case spv::OpSpecConstant:
950        case spv::OpSpecConstantComposite:
951        case spv::OpSpecConstantOp:
952            module->def_index[insn.word(2)] = insn.offset();
953            break;
954
955        /* Variables */
956        case spv::OpVariable:
957            module->def_index[insn.word(2)] = insn.offset();
958            break;
959
960        /* Functions */
961        case spv::OpFunction:
962            module->def_index[insn.word(2)] = insn.offset();
963            break;
964
965        default:
966            /* We don't care about any other defs for now. */
967            break;
968        }
969    }
970}
971
972static spirv_inst_iter find_entrypoint(shader_module *src, char const *name, VkShaderStageFlagBits stageBits) {
973    for (auto insn : *src) {
974        if (insn.opcode() == spv::OpEntryPoint) {
975            auto entrypointName = (char const *)&insn.word(3);
976            auto entrypointStageBits = 1u << insn.word(1);
977
978            if (!strcmp(entrypointName, name) && (entrypointStageBits & stageBits)) {
979                return insn;
980            }
981        }
982    }
983
984    return src->end();
985}
986
987static char const *storage_class_name(unsigned sc) {
988    switch (sc) {
989    case spv::StorageClassInput:
990        return "input";
991    case spv::StorageClassOutput:
992        return "output";
993    case spv::StorageClassUniformConstant:
994        return "const uniform";
995    case spv::StorageClassUniform:
996        return "uniform";
997    case spv::StorageClassWorkgroup:
998        return "workgroup local";
999    case spv::StorageClassCrossWorkgroup:
1000        return "workgroup global";
1001    case spv::StorageClassPrivate:
1002        return "private global";
1003    case spv::StorageClassFunction:
1004        return "function";
1005    case spv::StorageClassGeneric:
1006        return "generic";
1007    case spv::StorageClassAtomicCounter:
1008        return "atomic counter";
1009    case spv::StorageClassImage:
1010        return "image";
1011    case spv::StorageClassPushConstant:
1012        return "push constant";
1013    default:
1014        return "unknown";
1015    }
1016}
1017
1018/* get the value of an integral constant */
1019unsigned get_constant_value(shader_module const *src, unsigned id) {
1020    auto value = src->get_def(id);
1021    assert(value != src->end());
1022
1023    if (value.opcode() != spv::OpConstant) {
1024        /* TODO: Either ensure that the specialization transform is already performed on a module we're
1025            considering here, OR -- specialize on the fly now.
1026            */
1027        return 1;
1028    }
1029
1030    return value.word(3);
1031}
1032
1033
1034static void describe_type_inner(std::ostringstream &ss, shader_module const *src, unsigned type) {
1035    auto insn = src->get_def(type);
1036    assert(insn != src->end());
1037
1038    switch (insn.opcode()) {
1039    case spv::OpTypeBool:
1040        ss << "bool";
1041        break;
1042    case spv::OpTypeInt:
1043        ss << (insn.word(3) ? 's' : 'u') << "int" << insn.word(2);
1044        break;
1045    case spv::OpTypeFloat:
1046        ss << "float" << insn.word(2);
1047        break;
1048    case spv::OpTypeVector:
1049        ss << "vec" << insn.word(3) << " of ";
1050        describe_type_inner(ss, src, insn.word(2));
1051        break;
1052    case spv::OpTypeMatrix:
1053        ss << "mat" << insn.word(3) << " of ";
1054        describe_type_inner(ss, src, insn.word(2));
1055        break;
1056    case spv::OpTypeArray:
1057        ss << "arr[" << get_constant_value(src, insn.word(3)) << "] of ";
1058        describe_type_inner(ss, src, insn.word(2));
1059        break;
1060    case spv::OpTypePointer:
1061        ss << "ptr to " << storage_class_name(insn.word(2)) << " ";
1062        describe_type_inner(ss, src, insn.word(3));
1063        break;
1064    case spv::OpTypeStruct: {
1065        ss << "struct of (";
1066        for (unsigned i = 2; i < insn.len(); i++) {
1067            describe_type_inner(ss, src, insn.word(i));
1068            if (i == insn.len() - 1) {
1069                ss << ")";
1070            } else {
1071                ss << ", ";
1072            }
1073        }
1074        break;
1075    }
1076    case spv::OpTypeSampler:
1077        ss << "sampler";
1078        break;
1079    case spv::OpTypeSampledImage:
1080        ss << "sampler+";
1081        describe_type_inner(ss, src, insn.word(2));
1082        break;
1083    case spv::OpTypeImage:
1084        ss << "image(dim=" << insn.word(3) << ", sampled=" << insn.word(7) << ")";
1085        break;
1086    default:
1087        ss << "oddtype";
1088        break;
1089    }
1090}
1091
1092
1093static std::string describe_type(shader_module const *src, unsigned type) {
1094    std::ostringstream ss;
1095    describe_type_inner(ss, src, type);
1096    return ss.str();
1097}
1098
1099
1100static bool is_narrow_numeric_type(spirv_inst_iter type)
1101{
1102    if (type.opcode() != spv::OpTypeInt && type.opcode() != spv::OpTypeFloat)
1103        return false;
1104    return type.word(2) < 64;
1105}
1106
1107
1108static bool types_match(shader_module const *a, shader_module const *b, unsigned a_type, unsigned b_type, bool a_arrayed, bool b_arrayed, bool relaxed) {
1109    /* walk two type trees together, and complain about differences */
1110    auto a_insn = a->get_def(a_type);
1111    auto b_insn = b->get_def(b_type);
1112    assert(a_insn != a->end());
1113    assert(b_insn != b->end());
1114
1115    if (a_arrayed && a_insn.opcode() == spv::OpTypeArray) {
1116        return types_match(a, b, a_insn.word(2), b_type, false, b_arrayed, relaxed);
1117    }
1118
1119    if (b_arrayed && b_insn.opcode() == spv::OpTypeArray) {
1120        /* we probably just found the extra level of arrayness in b_type: compare the type inside it to a_type */
1121        return types_match(a, b, a_type, b_insn.word(2), a_arrayed, false, relaxed);
1122    }
1123
1124    if (a_insn.opcode() == spv::OpTypeVector && relaxed && is_narrow_numeric_type(b_insn)) {
1125        return types_match(a, b, a_insn.word(2), b_type, a_arrayed, b_arrayed, false);
1126    }
1127
1128    if (a_insn.opcode() != b_insn.opcode()) {
1129        return false;
1130    }
1131
1132    if (a_insn.opcode() == spv::OpTypePointer) {
1133        /* match on pointee type. storage class is expected to differ */
1134        return types_match(a, b, a_insn.word(3), b_insn.word(3), a_arrayed, b_arrayed, relaxed);
1135    }
1136
1137    if (a_arrayed || b_arrayed) {
1138        /* if we havent resolved array-of-verts by here, we're not going to. */
1139        return false;
1140    }
1141
1142    switch (a_insn.opcode()) {
1143    case spv::OpTypeBool:
1144        return true;
1145    case spv::OpTypeInt:
1146        /* match on width, signedness */
1147        return a_insn.word(2) == b_insn.word(2) && a_insn.word(3) == b_insn.word(3);
1148    case spv::OpTypeFloat:
1149        /* match on width */
1150        return a_insn.word(2) == b_insn.word(2);
1151    case spv::OpTypeVector:
1152        /* match on element type, count. */
1153        if (!types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false))
1154            return false;
1155        if (relaxed && is_narrow_numeric_type(a->get_def(a_insn.word(2)))) {
1156            return a_insn.word(3) >= b_insn.word(3);
1157        }
1158        else {
1159            return a_insn.word(3) == b_insn.word(3);
1160        }
1161    case spv::OpTypeMatrix:
1162        /* match on element type, count. */
1163        return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false) && a_insn.word(3) == b_insn.word(3);
1164    case spv::OpTypeArray:
1165        /* match on element type, count. these all have the same layout. we don't get here if
1166         * b_arrayed. This differs from vector & matrix types in that the array size is the id of a constant instruction,
1167         * not a literal within OpTypeArray */
1168        return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false) &&
1169               get_constant_value(a, a_insn.word(3)) == get_constant_value(b, b_insn.word(3));
1170    case spv::OpTypeStruct:
1171        /* match on all element types */
1172        {
1173            if (a_insn.len() != b_insn.len()) {
1174                return false; /* structs cannot match if member counts differ */
1175            }
1176
1177            for (unsigned i = 2; i < a_insn.len(); i++) {
1178                if (!types_match(a, b, a_insn.word(i), b_insn.word(i), a_arrayed, b_arrayed, false)) {
1179                    return false;
1180                }
1181            }
1182
1183            return true;
1184        }
1185    default:
1186        /* remaining types are CLisms, or may not appear in the interfaces we
1187         * are interested in. Just claim no match.
1188         */
1189        return false;
1190    }
1191}
1192
1193static int value_or_default(std::unordered_map<unsigned, unsigned> const &map, unsigned id, int def) {
1194    auto it = map.find(id);
1195    if (it == map.end())
1196        return def;
1197    else
1198        return it->second;
1199}
1200
1201static unsigned get_locations_consumed_by_type(shader_module const *src, unsigned type, bool strip_array_level) {
1202    auto insn = src->get_def(type);
1203    assert(insn != src->end());
1204
1205    switch (insn.opcode()) {
1206    case spv::OpTypePointer:
1207        /* see through the ptr -- this is only ever at the toplevel for graphics shaders;
1208         * we're never actually passing pointers around. */
1209        return get_locations_consumed_by_type(src, insn.word(3), strip_array_level);
1210    case spv::OpTypeArray:
1211        if (strip_array_level) {
1212            return get_locations_consumed_by_type(src, insn.word(2), false);
1213        } else {
1214            return get_constant_value(src, insn.word(3)) * get_locations_consumed_by_type(src, insn.word(2), false);
1215        }
1216    case spv::OpTypeMatrix:
1217        /* num locations is the dimension * element size */
1218        return insn.word(3) * get_locations_consumed_by_type(src, insn.word(2), false);
1219    case spv::OpTypeVector: {
1220        auto scalar_type = src->get_def(insn.word(2));
1221        auto bit_width = (scalar_type.opcode() == spv::OpTypeInt || scalar_type.opcode() == spv::OpTypeFloat) ?
1222            scalar_type.word(2) : 32;
1223
1224        /* locations are 128-bit wide; 3- and 4-component vectors of 64 bit
1225         * types require two. */
1226        return (bit_width * insn.word(3) + 127) / 128;
1227    }
1228    default:
1229        /* everything else is just 1. */
1230        return 1;
1231
1232        /* TODO: extend to handle 64bit scalar types, whose vectors may need
1233         * multiple locations. */
1234    }
1235}
1236
1237static unsigned get_locations_consumed_by_format(VkFormat format) {
1238    switch (format) {
1239    case VK_FORMAT_R64G64B64A64_SFLOAT:
1240    case VK_FORMAT_R64G64B64A64_SINT:
1241    case VK_FORMAT_R64G64B64A64_UINT:
1242    case VK_FORMAT_R64G64B64_SFLOAT:
1243    case VK_FORMAT_R64G64B64_SINT:
1244    case VK_FORMAT_R64G64B64_UINT:
1245        return 2;
1246    default:
1247        return 1;
1248    }
1249}
1250
1251typedef std::pair<unsigned, unsigned> location_t;
1252typedef std::pair<unsigned, unsigned> descriptor_slot_t;
1253
1254struct interface_var {
1255    uint32_t id;
1256    uint32_t type_id;
1257    uint32_t offset;
1258    bool is_patch;
1259    bool is_block_member;
1260    /* TODO: collect the name, too? Isn't required to be present. */
1261};
1262
1263struct shader_stage_attributes {
1264    char const *const name;
1265    bool arrayed_input;
1266    bool arrayed_output;
1267};
1268
1269static shader_stage_attributes shader_stage_attribs[] = {
1270    {"vertex shader", false, false},
1271    {"tessellation control shader", true, true},
1272    {"tessellation evaluation shader", true, false},
1273    {"geometry shader", true, false},
1274    {"fragment shader", false, false},
1275};
1276
1277static spirv_inst_iter get_struct_type(shader_module const *src, spirv_inst_iter def, bool is_array_of_verts) {
1278    while (true) {
1279
1280        if (def.opcode() == spv::OpTypePointer) {
1281            def = src->get_def(def.word(3));
1282        } else if (def.opcode() == spv::OpTypeArray && is_array_of_verts) {
1283            def = src->get_def(def.word(2));
1284            is_array_of_verts = false;
1285        } else if (def.opcode() == spv::OpTypeStruct) {
1286            return def;
1287        } else {
1288            return src->end();
1289        }
1290    }
1291}
1292
1293static void collect_interface_block_members(shader_module const *src,
1294                                            std::map<location_t, interface_var> &out,
1295                                            std::unordered_map<unsigned, unsigned> const &blocks, bool is_array_of_verts,
1296                                            uint32_t id, uint32_t type_id, bool is_patch) {
1297    /* Walk down the type_id presented, trying to determine whether it's actually an interface block. */
1298    auto type = get_struct_type(src, src->get_def(type_id), is_array_of_verts && !is_patch);
1299    if (type == src->end() || blocks.find(type.word(1)) == blocks.end()) {
1300        /* this isn't an interface block. */
1301        return;
1302    }
1303
1304    std::unordered_map<unsigned, unsigned> member_components;
1305
1306    /* Walk all the OpMemberDecorate for type's result id -- first pass, collect components. */
1307    for (auto insn : *src) {
1308        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1309            unsigned member_index = insn.word(2);
1310
1311            if (insn.word(3) == spv::DecorationComponent) {
1312                unsigned component = insn.word(4);
1313                member_components[member_index] = component;
1314            }
1315        }
1316    }
1317
1318    /* Second pass -- produce the output, from Location decorations */
1319    for (auto insn : *src) {
1320        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1321            unsigned member_index = insn.word(2);
1322            unsigned member_type_id = type.word(2 + member_index);
1323
1324            if (insn.word(3) == spv::DecorationLocation) {
1325                unsigned location = insn.word(4);
1326                unsigned num_locations = get_locations_consumed_by_type(src, member_type_id, false);
1327                auto component_it = member_components.find(member_index);
1328                unsigned component = component_it == member_components.end() ? 0 : component_it->second;
1329
1330                for (unsigned int offset = 0; offset < num_locations; offset++) {
1331                    interface_var v;
1332                    v.id = id;
1333                    /* TODO: member index in interface_var too? */
1334                    v.type_id = member_type_id;
1335                    v.offset = offset;
1336                    v.is_patch = is_patch;
1337                    v.is_block_member = true;
1338                    out[std::make_pair(location + offset, component)] = v;
1339                }
1340            }
1341        }
1342    }
1343}
1344
1345static void collect_interface_by_location(shader_module const *src, spirv_inst_iter entrypoint,
1346                                          spv::StorageClass sinterface, std::map<location_t, interface_var> &out,
1347                                          bool is_array_of_verts) {
1348    std::unordered_map<unsigned, unsigned> var_locations;
1349    std::unordered_map<unsigned, unsigned> var_builtins;
1350    std::unordered_map<unsigned, unsigned> var_components;
1351    std::unordered_map<unsigned, unsigned> blocks;
1352    std::unordered_map<unsigned, unsigned> var_patch;
1353
1354    for (auto insn : *src) {
1355
1356        /* We consider two interface models: SSO rendezvous-by-location, and
1357         * builtins. Complain about anything that fits neither model.
1358         */
1359        if (insn.opcode() == spv::OpDecorate) {
1360            if (insn.word(2) == spv::DecorationLocation) {
1361                var_locations[insn.word(1)] = insn.word(3);
1362            }
1363
1364            if (insn.word(2) == spv::DecorationBuiltIn) {
1365                var_builtins[insn.word(1)] = insn.word(3);
1366            }
1367
1368            if (insn.word(2) == spv::DecorationComponent) {
1369                var_components[insn.word(1)] = insn.word(3);
1370            }
1371
1372            if (insn.word(2) == spv::DecorationBlock) {
1373                blocks[insn.word(1)] = 1;
1374            }
1375
1376            if (insn.word(2) == spv::DecorationPatch) {
1377                var_patch[insn.word(1)] = 1;
1378            }
1379        }
1380    }
1381
1382    /* TODO: handle grouped decorations */
1383    /* TODO: handle index=1 dual source outputs from FS -- two vars will
1384     * have the same location, and we DON'T want to clobber. */
1385
1386    /* find the end of the entrypoint's name string. additional zero bytes follow the actual null
1387       terminator, to fill out the rest of the word - so we only need to look at the last byte in
1388       the word to determine which word contains the terminator. */
1389    uint32_t word = 3;
1390    while (entrypoint.word(word) & 0xff000000u) {
1391        ++word;
1392    }
1393    ++word;
1394
1395    for (; word < entrypoint.len(); word++) {
1396        auto insn = src->get_def(entrypoint.word(word));
1397        assert(insn != src->end());
1398        assert(insn.opcode() == spv::OpVariable);
1399
1400        if (insn.word(3) == static_cast<uint32_t>(sinterface)) {
1401            unsigned id = insn.word(2);
1402            unsigned type = insn.word(1);
1403
1404            int location = value_or_default(var_locations, id, -1);
1405            int builtin = value_or_default(var_builtins, id, -1);
1406            unsigned component = value_or_default(var_components, id, 0); /* unspecified is OK, is 0 */
1407            bool is_patch = var_patch.find(id) != var_patch.end();
1408
1409            /* All variables and interface block members in the Input or Output storage classes
1410             * must be decorated with either a builtin or an explicit location.
1411             *
1412             * TODO: integrate the interface block support here. For now, don't complain --
1413             * a valid SPIRV module will only hit this path for the interface block case, as the
1414             * individual members of the type are decorated, rather than variable declarations.
1415             */
1416
1417            if (location != -1) {
1418                /* A user-defined interface variable, with a location. Where a variable
1419                 * occupied multiple locations, emit one result for each. */
1420                unsigned num_locations = get_locations_consumed_by_type(src, type, is_array_of_verts && !is_patch);
1421                for (unsigned int offset = 0; offset < num_locations; offset++) {
1422                    interface_var v;
1423                    v.id = id;
1424                    v.type_id = type;
1425                    v.offset = offset;
1426                    v.is_patch = is_patch;
1427                    v.is_block_member = false;
1428                    out[std::make_pair(location + offset, component)] = v;
1429                }
1430            } else if (builtin == -1) {
1431                /* An interface block instance */
1432                collect_interface_block_members(src, out, blocks, is_array_of_verts, id, type, is_patch);
1433            }
1434        }
1435    }
1436}
1437
1438static void collect_interface_by_descriptor_slot(debug_report_data *report_data, shader_module const *src,
1439                                                 std::unordered_set<uint32_t> const &accessible_ids,
1440                                                 std::map<descriptor_slot_t, interface_var> &out) {
1441
1442    std::unordered_map<unsigned, unsigned> var_sets;
1443    std::unordered_map<unsigned, unsigned> var_bindings;
1444
1445    for (auto insn : *src) {
1446        /* All variables in the Uniform or UniformConstant storage classes are required to be decorated with both
1447         * DecorationDescriptorSet and DecorationBinding.
1448         */
1449        if (insn.opcode() == spv::OpDecorate) {
1450            if (insn.word(2) == spv::DecorationDescriptorSet) {
1451                var_sets[insn.word(1)] = insn.word(3);
1452            }
1453
1454            if (insn.word(2) == spv::DecorationBinding) {
1455                var_bindings[insn.word(1)] = insn.word(3);
1456            }
1457        }
1458    }
1459
1460    for (auto id : accessible_ids) {
1461        auto insn = src->get_def(id);
1462        assert(insn != src->end());
1463
1464        if (insn.opcode() == spv::OpVariable &&
1465            (insn.word(3) == spv::StorageClassUniform || insn.word(3) == spv::StorageClassUniformConstant)) {
1466            unsigned set = value_or_default(var_sets, insn.word(2), 0);
1467            unsigned binding = value_or_default(var_bindings, insn.word(2), 0);
1468
1469            auto existing_it = out.find(std::make_pair(set, binding));
1470            if (existing_it != out.end()) {
1471                /* conflict within spv image */
1472                log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1473                        __LINE__, SHADER_CHECKER_INCONSISTENT_SPIRV, "SC",
1474                        "var %d (type %d) in %s interface in descriptor slot (%u,%u) conflicts with existing definition",
1475                        insn.word(2), insn.word(1), storage_class_name(insn.word(3)), existing_it->first.first,
1476                        existing_it->first.second);
1477            }
1478
1479            interface_var v;
1480            v.id = insn.word(2);
1481            v.type_id = insn.word(1);
1482            v.offset = 0;
1483            v.is_patch = false;
1484            v.is_block_member = false;
1485            out[std::make_pair(set, binding)] = v;
1486        }
1487    }
1488}
1489
1490static bool validate_interface_between_stages(debug_report_data *report_data, shader_module const *producer,
1491                                              spirv_inst_iter producer_entrypoint, shader_stage_attributes const *producer_stage,
1492                                              shader_module const *consumer, spirv_inst_iter consumer_entrypoint,
1493                                              shader_stage_attributes const *consumer_stage) {
1494    std::map<location_t, interface_var> outputs;
1495    std::map<location_t, interface_var> inputs;
1496
1497    bool pass = true;
1498
1499    collect_interface_by_location(producer, producer_entrypoint, spv::StorageClassOutput, outputs, producer_stage->arrayed_output);
1500    collect_interface_by_location(consumer, consumer_entrypoint, spv::StorageClassInput, inputs, consumer_stage->arrayed_input);
1501
1502    auto a_it = outputs.begin();
1503    auto b_it = inputs.begin();
1504
1505    /* maps sorted by key (location); walk them together to find mismatches */
1506    while ((outputs.size() > 0 && a_it != outputs.end()) || (inputs.size() && b_it != inputs.end())) {
1507        bool a_at_end = outputs.size() == 0 || a_it == outputs.end();
1508        bool b_at_end = inputs.size() == 0 || b_it == inputs.end();
1509        auto a_first = a_at_end ? std::make_pair(0u, 0u) : a_it->first;
1510        auto b_first = b_at_end ? std::make_pair(0u, 0u) : b_it->first;
1511
1512        if (b_at_end || ((!a_at_end) && (a_first < b_first))) {
1513            if (log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1514                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1515                        "%s writes to output location %u.%u which is not consumed by %s", producer_stage->name, a_first.first,
1516                        a_first.second, consumer_stage->name)) {
1517                pass = false;
1518            }
1519            a_it++;
1520        } else if (a_at_end || a_first > b_first) {
1521            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1522                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC",
1523                        "%s consumes input location %u.%u which is not written by %s", consumer_stage->name, b_first.first, b_first.second,
1524                        producer_stage->name)) {
1525                pass = false;
1526            }
1527            b_it++;
1528        } else {
1529            // subtleties of arrayed interfaces:
1530            // - if is_patch, then the member is not arrayed, even though the interface may be.
1531            // - if is_block_member, then the extra array level of an arrayed interface is not
1532            //   expressed in the member type -- it's expressed in the block type.
1533            if (!types_match(producer, consumer, a_it->second.type_id, b_it->second.type_id,
1534                             producer_stage->arrayed_output && !a_it->second.is_patch && !a_it->second.is_block_member,
1535                             consumer_stage->arrayed_input && !b_it->second.is_patch && !b_it->second.is_block_member,
1536                             true)) {
1537                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1538                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC", "Type mismatch on location %u.%u: '%s' vs '%s'",
1539                            a_first.first, a_first.second,
1540                            describe_type(producer, a_it->second.type_id).c_str(),
1541                            describe_type(consumer, b_it->second.type_id).c_str())) {
1542                    pass = false;
1543                }
1544            }
1545            if (a_it->second.is_patch != b_it->second.is_patch) {
1546                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1547                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1548                            "Decoration mismatch on location %u.%u: is per-%s in %s stage but "
1549                            "per-%s in %s stage", a_first.first, a_first.second,
1550                            a_it->second.is_patch ? "patch" : "vertex", producer_stage->name,
1551                            b_it->second.is_patch ? "patch" : "vertex", consumer_stage->name)) {
1552                    pass = false;
1553                }
1554            }
1555            a_it++;
1556            b_it++;
1557        }
1558    }
1559
1560    return pass;
1561}
1562
1563enum FORMAT_TYPE {
1564    FORMAT_TYPE_UNDEFINED,
1565    FORMAT_TYPE_FLOAT, /* UNORM, SNORM, FLOAT, USCALED, SSCALED, SRGB -- anything we consider float in the shader */
1566    FORMAT_TYPE_SINT,
1567    FORMAT_TYPE_UINT,
1568};
1569
1570static unsigned get_format_type(VkFormat fmt) {
1571    switch (fmt) {
1572    case VK_FORMAT_UNDEFINED:
1573        return FORMAT_TYPE_UNDEFINED;
1574    case VK_FORMAT_R8_SINT:
1575    case VK_FORMAT_R8G8_SINT:
1576    case VK_FORMAT_R8G8B8_SINT:
1577    case VK_FORMAT_R8G8B8A8_SINT:
1578    case VK_FORMAT_R16_SINT:
1579    case VK_FORMAT_R16G16_SINT:
1580    case VK_FORMAT_R16G16B16_SINT:
1581    case VK_FORMAT_R16G16B16A16_SINT:
1582    case VK_FORMAT_R32_SINT:
1583    case VK_FORMAT_R32G32_SINT:
1584    case VK_FORMAT_R32G32B32_SINT:
1585    case VK_FORMAT_R32G32B32A32_SINT:
1586    case VK_FORMAT_R64_SINT:
1587    case VK_FORMAT_R64G64_SINT:
1588    case VK_FORMAT_R64G64B64_SINT:
1589    case VK_FORMAT_R64G64B64A64_SINT:
1590    case VK_FORMAT_B8G8R8_SINT:
1591    case VK_FORMAT_B8G8R8A8_SINT:
1592    case VK_FORMAT_A8B8G8R8_SINT_PACK32:
1593    case VK_FORMAT_A2B10G10R10_SINT_PACK32:
1594    case VK_FORMAT_A2R10G10B10_SINT_PACK32:
1595        return FORMAT_TYPE_SINT;
1596    case VK_FORMAT_R8_UINT:
1597    case VK_FORMAT_R8G8_UINT:
1598    case VK_FORMAT_R8G8B8_UINT:
1599    case VK_FORMAT_R8G8B8A8_UINT:
1600    case VK_FORMAT_R16_UINT:
1601    case VK_FORMAT_R16G16_UINT:
1602    case VK_FORMAT_R16G16B16_UINT:
1603    case VK_FORMAT_R16G16B16A16_UINT:
1604    case VK_FORMAT_R32_UINT:
1605    case VK_FORMAT_R32G32_UINT:
1606    case VK_FORMAT_R32G32B32_UINT:
1607    case VK_FORMAT_R32G32B32A32_UINT:
1608    case VK_FORMAT_R64_UINT:
1609    case VK_FORMAT_R64G64_UINT:
1610    case VK_FORMAT_R64G64B64_UINT:
1611    case VK_FORMAT_R64G64B64A64_UINT:
1612    case VK_FORMAT_B8G8R8_UINT:
1613    case VK_FORMAT_B8G8R8A8_UINT:
1614    case VK_FORMAT_A8B8G8R8_UINT_PACK32:
1615    case VK_FORMAT_A2B10G10R10_UINT_PACK32:
1616    case VK_FORMAT_A2R10G10B10_UINT_PACK32:
1617        return FORMAT_TYPE_UINT;
1618    default:
1619        return FORMAT_TYPE_FLOAT;
1620    }
1621}
1622
1623/* characterizes a SPIR-V type appearing in an interface to a FF stage,
1624 * for comparison to a VkFormat's characterization above. */
1625static unsigned get_fundamental_type(shader_module const *src, unsigned type) {
1626    auto insn = src->get_def(type);
1627    assert(insn != src->end());
1628
1629    switch (insn.opcode()) {
1630    case spv::OpTypeInt:
1631        return insn.word(3) ? FORMAT_TYPE_SINT : FORMAT_TYPE_UINT;
1632    case spv::OpTypeFloat:
1633        return FORMAT_TYPE_FLOAT;
1634    case spv::OpTypeVector:
1635        return get_fundamental_type(src, insn.word(2));
1636    case spv::OpTypeMatrix:
1637        return get_fundamental_type(src, insn.word(2));
1638    case spv::OpTypeArray:
1639        return get_fundamental_type(src, insn.word(2));
1640    case spv::OpTypePointer:
1641        return get_fundamental_type(src, insn.word(3));
1642    default:
1643        return FORMAT_TYPE_UNDEFINED;
1644    }
1645}
1646
1647static uint32_t get_shader_stage_id(VkShaderStageFlagBits stage) {
1648    uint32_t bit_pos = u_ffs(stage);
1649    return bit_pos - 1;
1650}
1651
1652static bool validate_vi_consistency(debug_report_data *report_data, VkPipelineVertexInputStateCreateInfo const *vi) {
1653    /* walk the binding descriptions, which describe the step rate and stride of each vertex buffer.
1654     * each binding should be specified only once.
1655     */
1656    std::unordered_map<uint32_t, VkVertexInputBindingDescription const *> bindings;
1657    bool pass = true;
1658
1659    for (unsigned i = 0; i < vi->vertexBindingDescriptionCount; i++) {
1660        auto desc = &vi->pVertexBindingDescriptions[i];
1661        auto &binding = bindings[desc->binding];
1662        if (binding) {
1663            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1664                        __LINE__, SHADER_CHECKER_INCONSISTENT_VI, "SC",
1665                        "Duplicate vertex input binding descriptions for binding %d", desc->binding)) {
1666                pass = false;
1667            }
1668        } else {
1669            binding = desc;
1670        }
1671    }
1672
1673    return pass;
1674}
1675
1676static bool validate_vi_against_vs_inputs(debug_report_data *report_data, VkPipelineVertexInputStateCreateInfo const *vi,
1677                                          shader_module const *vs, spirv_inst_iter entrypoint) {
1678    std::map<location_t, interface_var> inputs;
1679    bool pass = true;
1680
1681    collect_interface_by_location(vs, entrypoint, spv::StorageClassInput, inputs, false);
1682
1683    /* Build index by location */
1684    std::map<uint32_t, VkVertexInputAttributeDescription const *> attribs;
1685    if (vi) {
1686        for (unsigned i = 0; i < vi->vertexAttributeDescriptionCount; i++) {
1687            auto num_locations = get_locations_consumed_by_format(vi->pVertexAttributeDescriptions[i].format);
1688            for (auto j = 0u; j < num_locations; j++) {
1689                attribs[vi->pVertexAttributeDescriptions[i].location + j] = &vi->pVertexAttributeDescriptions[i];
1690            }
1691        }
1692    }
1693
1694    auto it_a = attribs.begin();
1695    auto it_b = inputs.begin();
1696
1697    while ((attribs.size() > 0 && it_a != attribs.end()) || (inputs.size() > 0 && it_b != inputs.end())) {
1698        bool a_at_end = attribs.size() == 0 || it_a == attribs.end();
1699        bool b_at_end = inputs.size() == 0 || it_b == inputs.end();
1700        auto a_first = a_at_end ? 0 : it_a->first;
1701        auto b_first = b_at_end ? 0 : it_b->first.first;
1702        if (!a_at_end && (b_at_end || a_first < b_first)) {
1703            if (log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1704                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1705                        "Vertex attribute at location %d not consumed by VS", a_first)) {
1706                pass = false;
1707            }
1708            it_a++;
1709        } else if (!b_at_end && (a_at_end || b_first < a_first)) {
1710            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1711                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "VS consumes input at location %d but not provided",
1712                        b_first)) {
1713                pass = false;
1714            }
1715            it_b++;
1716        } else {
1717            unsigned attrib_type = get_format_type(it_a->second->format);
1718            unsigned input_type = get_fundamental_type(vs, it_b->second.type_id);
1719
1720            /* type checking */
1721            if (attrib_type != FORMAT_TYPE_UNDEFINED && input_type != FORMAT_TYPE_UNDEFINED && attrib_type != input_type) {
1722                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1723                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1724                            "Attribute type of `%s` at location %d does not match VS input type of `%s`",
1725                            string_VkFormat(it_a->second->format), a_first,
1726                            describe_type(vs, it_b->second.type_id).c_str())) {
1727                    pass = false;
1728                }
1729            }
1730
1731            /* OK! */
1732            it_a++;
1733            it_b++;
1734        }
1735    }
1736
1737    return pass;
1738}
1739
1740static bool validate_fs_outputs_against_render_pass(debug_report_data *report_data, shader_module const *fs,
1741                                                    spirv_inst_iter entrypoint, RENDER_PASS_NODE const *rp, uint32_t subpass) {
1742    std::map<location_t, interface_var> outputs;
1743    std::map<uint32_t, VkFormat> color_attachments;
1744    for (auto i = 0u; i < rp->subpassColorFormats[subpass].size(); i++) {
1745        if (rp->subpassColorFormats[subpass][i] != VK_FORMAT_UNDEFINED) {
1746            color_attachments[i] = rp->subpassColorFormats[subpass][i];
1747        }
1748    }
1749
1750    bool pass = true;
1751
1752    /* TODO: dual source blend index (spv::DecIndex, zero if not provided) */
1753
1754    collect_interface_by_location(fs, entrypoint, spv::StorageClassOutput, outputs, false);
1755
1756    auto it_a = outputs.begin();
1757    auto it_b = color_attachments.begin();
1758
1759    /* Walk attachment list and outputs together */
1760
1761    while ((outputs.size() > 0 && it_a != outputs.end()) || (color_attachments.size() > 0 && it_b != color_attachments.end())) {
1762        bool a_at_end = outputs.size() == 0 || it_a == outputs.end();
1763        bool b_at_end = color_attachments.size() == 0 || it_b == color_attachments.end();
1764
1765        if (!a_at_end && (b_at_end || it_a->first.first < it_b->first)) {
1766            if (log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1767                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1768                        "FS writes to output location %d with no matching attachment", it_a->first.first)) {
1769                pass = false;
1770            }
1771            it_a++;
1772        } else if (!b_at_end && (a_at_end || it_a->first.first > it_b->first)) {
1773            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1774                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "Attachment %d not written by FS", it_b->first)) {
1775                pass = false;
1776            }
1777            it_b++;
1778        } else {
1779            unsigned output_type = get_fundamental_type(fs, it_a->second.type_id);
1780            unsigned att_type = get_format_type(it_b->second);
1781
1782            /* type checking */
1783            if (att_type != FORMAT_TYPE_UNDEFINED && output_type != FORMAT_TYPE_UNDEFINED && att_type != output_type) {
1784                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1785                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1786                            "Attachment %d of type `%s` does not match FS output type of `%s`", it_b->first,
1787                            string_VkFormat(it_b->second),
1788                            describe_type(fs, it_a->second.type_id).c_str())) {
1789                    pass = false;
1790                }
1791            }
1792
1793            /* OK! */
1794            it_a++;
1795            it_b++;
1796        }
1797    }
1798
1799    return pass;
1800}
1801
1802/* For some analyses, we need to know about all ids referenced by the static call tree of a particular
1803 * entrypoint. This is important for identifying the set of shader resources actually used by an entrypoint,
1804 * for example.
1805 * Note: we only explore parts of the image which might actually contain ids we care about for the above analyses.
1806 *  - NOT the shader input/output interfaces.
1807 *
1808 * TODO: The set of interesting opcodes here was determined by eyeballing the SPIRV spec. It might be worth
1809 * converting parts of this to be generated from the machine-readable spec instead.
1810 */
1811static void mark_accessible_ids(shader_module const *src, spirv_inst_iter entrypoint, std::unordered_set<uint32_t> &ids) {
1812    std::unordered_set<uint32_t> worklist;
1813    worklist.insert(entrypoint.word(2));
1814
1815    while (!worklist.empty()) {
1816        auto id_iter = worklist.begin();
1817        auto id = *id_iter;
1818        worklist.erase(id_iter);
1819
1820        auto insn = src->get_def(id);
1821        if (insn == src->end()) {
1822            /* id is something we didn't collect in build_def_index. that's OK -- we'll stumble
1823             * across all kinds of things here that we may not care about. */
1824            continue;
1825        }
1826
1827        /* try to add to the output set */
1828        if (!ids.insert(id).second) {
1829            continue; /* if we already saw this id, we don't want to walk it again. */
1830        }
1831
1832        switch (insn.opcode()) {
1833        case spv::OpFunction:
1834            /* scan whole body of the function, enlisting anything interesting */
1835            while (++insn, insn.opcode() != spv::OpFunctionEnd) {
1836                switch (insn.opcode()) {
1837                case spv::OpLoad:
1838                case spv::OpAtomicLoad:
1839                case spv::OpAtomicExchange:
1840                case spv::OpAtomicCompareExchange:
1841                case spv::OpAtomicCompareExchangeWeak:
1842                case spv::OpAtomicIIncrement:
1843                case spv::OpAtomicIDecrement:
1844                case spv::OpAtomicIAdd:
1845                case spv::OpAtomicISub:
1846                case spv::OpAtomicSMin:
1847                case spv::OpAtomicUMin:
1848                case spv::OpAtomicSMax:
1849                case spv::OpAtomicUMax:
1850                case spv::OpAtomicAnd:
1851                case spv::OpAtomicOr:
1852                case spv::OpAtomicXor:
1853                    worklist.insert(insn.word(3)); /* ptr */
1854                    break;
1855                case spv::OpStore:
1856                case spv::OpAtomicStore:
1857                    worklist.insert(insn.word(1)); /* ptr */
1858                    break;
1859                case spv::OpAccessChain:
1860                case spv::OpInBoundsAccessChain:
1861                    worklist.insert(insn.word(3)); /* base ptr */
1862                    break;
1863                case spv::OpSampledImage:
1864                case spv::OpImageSampleImplicitLod:
1865                case spv::OpImageSampleExplicitLod:
1866                case spv::OpImageSampleDrefImplicitLod:
1867                case spv::OpImageSampleDrefExplicitLod:
1868                case spv::OpImageSampleProjImplicitLod:
1869                case spv::OpImageSampleProjExplicitLod:
1870                case spv::OpImageSampleProjDrefImplicitLod:
1871                case spv::OpImageSampleProjDrefExplicitLod:
1872                case spv::OpImageFetch:
1873                case spv::OpImageGather:
1874                case spv::OpImageDrefGather:
1875                case spv::OpImageRead:
1876                case spv::OpImage:
1877                case spv::OpImageQueryFormat:
1878                case spv::OpImageQueryOrder:
1879                case spv::OpImageQuerySizeLod:
1880                case spv::OpImageQuerySize:
1881                case spv::OpImageQueryLod:
1882                case spv::OpImageQueryLevels:
1883                case spv::OpImageQuerySamples:
1884                case spv::OpImageSparseSampleImplicitLod:
1885                case spv::OpImageSparseSampleExplicitLod:
1886                case spv::OpImageSparseSampleDrefImplicitLod:
1887                case spv::OpImageSparseSampleDrefExplicitLod:
1888                case spv::OpImageSparseSampleProjImplicitLod:
1889                case spv::OpImageSparseSampleProjExplicitLod:
1890                case spv::OpImageSparseSampleProjDrefImplicitLod:
1891                case spv::OpImageSparseSampleProjDrefExplicitLod:
1892                case spv::OpImageSparseFetch:
1893                case spv::OpImageSparseGather:
1894                case spv::OpImageSparseDrefGather:
1895                case spv::OpImageTexelPointer:
1896                    worklist.insert(insn.word(3)); /* image or sampled image */
1897                    break;
1898                case spv::OpImageWrite:
1899                    worklist.insert(insn.word(1)); /* image -- different operand order to above */
1900                    break;
1901                case spv::OpFunctionCall:
1902                    for (uint32_t i = 3; i < insn.len(); i++) {
1903                        worklist.insert(insn.word(i)); /* fn itself, and all args */
1904                    }
1905                    break;
1906
1907                case spv::OpExtInst:
1908                    for (uint32_t i = 5; i < insn.len(); i++) {
1909                        worklist.insert(insn.word(i)); /* operands to ext inst */
1910                    }
1911                    break;
1912                }
1913            }
1914            break;
1915        }
1916    }
1917}
1918
1919static bool validate_push_constant_block_against_pipeline(debug_report_data *report_data,
1920                                                          std::vector<VkPushConstantRange> const *pushConstantRanges,
1921                                                          shader_module const *src, spirv_inst_iter type,
1922                                                          VkShaderStageFlagBits stage) {
1923    bool pass = true;
1924
1925    /* strip off ptrs etc */
1926    type = get_struct_type(src, type, false);
1927    assert(type != src->end());
1928
1929    /* validate directly off the offsets. this isn't quite correct for arrays
1930     * and matrices, but is a good first step. TODO: arrays, matrices, weird
1931     * sizes */
1932    for (auto insn : *src) {
1933        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1934
1935            if (insn.word(3) == spv::DecorationOffset) {
1936                unsigned offset = insn.word(4);
1937                auto size = 4; /* bytes; TODO: calculate this based on the type */
1938
1939                bool found_range = false;
1940                for (auto const &range : *pushConstantRanges) {
1941                    if (range.offset <= offset && range.offset + range.size >= offset + size) {
1942                        found_range = true;
1943
1944                        if ((range.stageFlags & stage) == 0) {
1945                            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1946                                        __LINE__, SHADER_CHECKER_PUSH_CONSTANT_NOT_ACCESSIBLE_FROM_STAGE, "SC",
1947                                        "Push constant range covering variable starting at "
1948                                        "offset %u not accessible from stage %s",
1949                                        offset, string_VkShaderStageFlagBits(stage))) {
1950                                pass = false;
1951                            }
1952                        }
1953
1954                        break;
1955                    }
1956                }
1957
1958                if (!found_range) {
1959                    if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1960                                __LINE__, SHADER_CHECKER_PUSH_CONSTANT_OUT_OF_RANGE, "SC",
1961                                "Push constant range covering variable starting at "
1962                                "offset %u not declared in layout",
1963                                offset)) {
1964                        pass = false;
1965                    }
1966                }
1967            }
1968        }
1969    }
1970
1971    return pass;
1972}
1973
1974static bool validate_push_constant_usage(debug_report_data *report_data,
1975                                         std::vector<VkPushConstantRange> const *pushConstantRanges, shader_module const *src,
1976                                         std::unordered_set<uint32_t> accessible_ids, VkShaderStageFlagBits stage) {
1977    bool pass = true;
1978
1979    for (auto id : accessible_ids) {
1980        auto def_insn = src->get_def(id);
1981        if (def_insn.opcode() == spv::OpVariable && def_insn.word(3) == spv::StorageClassPushConstant) {
1982            pass &= validate_push_constant_block_against_pipeline(report_data, pushConstantRanges, src,
1983                                                                 src->get_def(def_insn.word(1)), stage);
1984        }
1985    }
1986
1987    return pass;
1988}
1989
1990// For given pipelineLayout verify that the set_layout_node at slot.first
1991//  has the requested binding at slot.second and return ptr to that binding
1992static VkDescriptorSetLayoutBinding const * get_descriptor_binding(PIPELINE_LAYOUT_NODE const *pipelineLayout, descriptor_slot_t slot) {
1993
1994    if (!pipelineLayout)
1995        return nullptr;
1996
1997    if (slot.first >= pipelineLayout->descriptorSetLayouts.size())
1998        return nullptr;
1999
2000    return pipelineLayout->setLayouts[slot.first]->GetDescriptorSetLayoutBindingPtrFromBinding(slot.second);
2001}
2002
2003// Block of code at start here for managing/tracking Pipeline state that this layer cares about
2004
2005static uint64_t g_drawCount[NUM_DRAW_TYPES] = {0, 0, 0, 0};
2006
2007// TODO : Should be tracking lastBound per commandBuffer and when draws occur, report based on that cmd buffer lastBound
2008//   Then need to synchronize the accesses based on cmd buffer so that if I'm reading state on one cmd buffer, updates
2009//   to that same cmd buffer by separate thread are not changing state from underneath us
2010// Track the last cmd buffer touched by this thread
2011
2012static bool hasDrawCmd(GLOBAL_CB_NODE *pCB) {
2013    for (uint32_t i = 0; i < NUM_DRAW_TYPES; i++) {
2014        if (pCB->drawCount[i])
2015            return true;
2016    }
2017    return false;
2018}
2019
2020// Check object status for selected flag state
2021static bool validate_status(layer_data *my_data, GLOBAL_CB_NODE *pNode, CBStatusFlags status_mask, VkFlags msg_flags,
2022                            DRAW_STATE_ERROR error_code, const char *fail_msg) {
2023    if (!(pNode->status & status_mask)) {
2024        return log_msg(my_data->report_data, msg_flags, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2025                       reinterpret_cast<const uint64_t &>(pNode->commandBuffer), __LINE__, error_code, "DS",
2026                       "CB object 0x%" PRIxLEAST64 ": %s", reinterpret_cast<const uint64_t &>(pNode->commandBuffer), fail_msg);
2027    }
2028    return false;
2029}
2030
2031// Retrieve pipeline node ptr for given pipeline object
2032static PIPELINE_NODE *getPipeline(layer_data const *my_data, VkPipeline pipeline) {
2033    auto it = my_data->pipelineMap.find(pipeline);
2034    if (it == my_data->pipelineMap.end()) {
2035        return nullptr;
2036    }
2037    return it->second;
2038}
2039
2040static RENDER_PASS_NODE *getRenderPass(layer_data const *my_data, VkRenderPass renderpass) {
2041    auto it = my_data->renderPassMap.find(renderpass);
2042    if (it == my_data->renderPassMap.end()) {
2043        return nullptr;
2044    }
2045    return it->second;
2046}
2047
2048static FRAMEBUFFER_NODE *getFramebuffer(layer_data *my_data, VkFramebuffer framebuffer) {
2049    auto it = my_data->frameBufferMap.find(framebuffer);
2050    if (it == my_data->frameBufferMap.end()) {
2051        return nullptr;
2052    }
2053    return &it->second;
2054}
2055
2056cvdescriptorset::DescriptorSetLayout const *getDescriptorSetLayout(layer_data const *my_data, VkDescriptorSetLayout dsLayout) {
2057    auto it = my_data->descriptorSetLayoutMap.find(dsLayout);
2058    if (it == my_data->descriptorSetLayoutMap.end()) {
2059        return nullptr;
2060    }
2061    return it->second;
2062}
2063
2064static PIPELINE_LAYOUT_NODE const *getPipelineLayout(layer_data const *my_data, VkPipelineLayout pipeLayout) {
2065    auto it = my_data->pipelineLayoutMap.find(pipeLayout);
2066    if (it == my_data->pipelineLayoutMap.end()) {
2067        return nullptr;
2068    }
2069    return &it->second;
2070}
2071
2072// Return true if for a given PSO, the given state enum is dynamic, else return false
2073static bool isDynamic(const PIPELINE_NODE *pPipeline, const VkDynamicState state) {
2074    if (pPipeline && pPipeline->graphicsPipelineCI.pDynamicState) {
2075        for (uint32_t i = 0; i < pPipeline->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
2076            if (state == pPipeline->graphicsPipelineCI.pDynamicState->pDynamicStates[i])
2077                return true;
2078        }
2079    }
2080    return false;
2081}
2082
2083// Validate state stored as flags at time of draw call
2084static bool validate_draw_state_flags(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const PIPELINE_NODE *pPipe, bool indexedDraw) {
2085    bool result;
2086    result = validate_status(dev_data, pCB, CBSTATUS_VIEWPORT_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_VIEWPORT_NOT_BOUND,
2087                             "Dynamic viewport state not set for this command buffer");
2088    result |= validate_status(dev_data, pCB, CBSTATUS_SCISSOR_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_SCISSOR_NOT_BOUND,
2089                              "Dynamic scissor state not set for this command buffer");
2090    if (pPipe->graphicsPipelineCI.pInputAssemblyState &&
2091        ((pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST) ||
2092         (pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP))) {
2093        result |= validate_status(dev_data, pCB, CBSTATUS_LINE_WIDTH_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2094                                  DRAWSTATE_LINE_WIDTH_NOT_BOUND, "Dynamic line width state not set for this command buffer");
2095    }
2096    if (pPipe->graphicsPipelineCI.pRasterizationState &&
2097        (pPipe->graphicsPipelineCI.pRasterizationState->depthBiasEnable == VK_TRUE)) {
2098        result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BIAS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2099                                  DRAWSTATE_DEPTH_BIAS_NOT_BOUND, "Dynamic depth bias state not set for this command buffer");
2100    }
2101    if (pPipe->blendConstantsEnabled) {
2102        result |= validate_status(dev_data, pCB, CBSTATUS_BLEND_CONSTANTS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2103                                  DRAWSTATE_BLEND_NOT_BOUND, "Dynamic blend constants state not set for this command buffer");
2104    }
2105    if (pPipe->graphicsPipelineCI.pDepthStencilState &&
2106        (pPipe->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE)) {
2107        result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BOUNDS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2108                                  DRAWSTATE_DEPTH_BOUNDS_NOT_BOUND, "Dynamic depth bounds state not set for this command buffer");
2109    }
2110    if (pPipe->graphicsPipelineCI.pDepthStencilState &&
2111        (pPipe->graphicsPipelineCI.pDepthStencilState->stencilTestEnable == VK_TRUE)) {
2112        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_READ_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2113                                  DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil read mask state not set for this command buffer");
2114        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_WRITE_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2115                                  DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil write mask state not set for this command buffer");
2116        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_REFERENCE_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2117                                  DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil reference state not set for this command buffer");
2118    }
2119    if (indexedDraw) {
2120        result |= validate_status(dev_data, pCB, CBSTATUS_INDEX_BUFFER_BOUND, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2121                                  DRAWSTATE_INDEX_BUFFER_NOT_BOUND,
2122                                  "Index buffer object not bound to this command buffer when Indexed Draw attempted");
2123    }
2124    return result;
2125}
2126
2127// Verify attachment reference compatibility according to spec
2128//  If one array is larger, treat missing elements of shorter array as VK_ATTACHMENT_UNUSED & other array much match this
2129//  If both AttachmentReference arrays have requested index, check their corresponding AttachementDescriptions
2130//   to make sure that format and samples counts match.
2131//  If not, they are not compatible.
2132static bool attachment_references_compatible(const uint32_t index, const VkAttachmentReference *pPrimary,
2133                                             const uint32_t primaryCount, const VkAttachmentDescription *pPrimaryAttachments,
2134                                             const VkAttachmentReference *pSecondary, const uint32_t secondaryCount,
2135                                             const VkAttachmentDescription *pSecondaryAttachments) {
2136    if (index >= primaryCount) { // Check secondary as if primary is VK_ATTACHMENT_UNUSED
2137        if (VK_ATTACHMENT_UNUSED == pSecondary[index].attachment)
2138            return true;
2139    } else if (index >= secondaryCount) { // Check primary as if secondary is VK_ATTACHMENT_UNUSED
2140        if (VK_ATTACHMENT_UNUSED == pPrimary[index].attachment)
2141            return true;
2142    } else { // format and sample count must match
2143        if ((pPrimaryAttachments[pPrimary[index].attachment].format ==
2144             pSecondaryAttachments[pSecondary[index].attachment].format) &&
2145            (pPrimaryAttachments[pPrimary[index].attachment].samples ==
2146             pSecondaryAttachments[pSecondary[index].attachment].samples))
2147            return true;
2148    }
2149    // Format and sample counts didn't match
2150    return false;
2151}
2152
2153// For give primary and secondary RenderPass objects, verify that they're compatible
2154static bool verify_renderpass_compatibility(layer_data *my_data, const VkRenderPass primaryRP, const VkRenderPass secondaryRP,
2155                                            string &errorMsg) {
2156    auto primary_render_pass = getRenderPass(my_data, primaryRP);
2157    auto secondary_render_pass = getRenderPass(my_data, secondaryRP);
2158
2159    if (!primary_render_pass) {
2160        stringstream errorStr;
2161        errorStr << "invalid VkRenderPass (" << primaryRP << ")";
2162        errorMsg = errorStr.str();
2163        return false;
2164    }
2165
2166    if (!secondary_render_pass) {
2167        stringstream errorStr;
2168        errorStr << "invalid VkRenderPass (" << secondaryRP << ")";
2169        errorMsg = errorStr.str();
2170        return false;
2171    }
2172    // Trivial pass case is exact same RP
2173    if (primaryRP == secondaryRP) {
2174        return true;
2175    }
2176    const VkRenderPassCreateInfo *primaryRPCI = primary_render_pass->pCreateInfo;
2177    const VkRenderPassCreateInfo *secondaryRPCI = secondary_render_pass->pCreateInfo;
2178    if (primaryRPCI->subpassCount != secondaryRPCI->subpassCount) {
2179        stringstream errorStr;
2180        errorStr << "RenderPass for primary cmdBuffer has " << primaryRPCI->subpassCount
2181                 << " subpasses but renderPass for secondary cmdBuffer has " << secondaryRPCI->subpassCount << " subpasses.";
2182        errorMsg = errorStr.str();
2183        return false;
2184    }
2185    uint32_t spIndex = 0;
2186    for (spIndex = 0; spIndex < primaryRPCI->subpassCount; ++spIndex) {
2187        // For each subpass, verify that corresponding color, input, resolve & depth/stencil attachment references are compatible
2188        uint32_t primaryColorCount = primaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2189        uint32_t secondaryColorCount = secondaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2190        uint32_t colorMax = std::max(primaryColorCount, secondaryColorCount);
2191        for (uint32_t cIdx = 0; cIdx < colorMax; ++cIdx) {
2192            if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pColorAttachments, primaryColorCount,
2193                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pColorAttachments,
2194                                                  secondaryColorCount, secondaryRPCI->pAttachments)) {
2195                stringstream errorStr;
2196                errorStr << "color attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2197                errorMsg = errorStr.str();
2198                return false;
2199            } else if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2200                                                         primaryColorCount, primaryRPCI->pAttachments,
2201                                                         secondaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2202                                                         secondaryColorCount, secondaryRPCI->pAttachments)) {
2203                stringstream errorStr;
2204                errorStr << "resolve attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2205                errorMsg = errorStr.str();
2206                return false;
2207            }
2208        }
2209
2210        if (!attachment_references_compatible(0, primaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2211                                              1, primaryRPCI->pAttachments,
2212                                              secondaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2213                                              1, secondaryRPCI->pAttachments)) {
2214            stringstream errorStr;
2215            errorStr << "depth/stencil attachments of subpass index " << spIndex << " are not compatible.";
2216            errorMsg = errorStr.str();
2217            return false;
2218        }
2219
2220        uint32_t primaryInputCount = primaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2221        uint32_t secondaryInputCount = secondaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2222        uint32_t inputMax = std::max(primaryInputCount, secondaryInputCount);
2223        for (uint32_t i = 0; i < inputMax; ++i) {
2224            if (!attachment_references_compatible(i, primaryRPCI->pSubpasses[spIndex].pInputAttachments, primaryColorCount,
2225                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pInputAttachments,
2226                                                  secondaryColorCount, secondaryRPCI->pAttachments)) {
2227                stringstream errorStr;
2228                errorStr << "input attachments at index " << i << " of subpass index " << spIndex << " are not compatible.";
2229                errorMsg = errorStr.str();
2230                return false;
2231            }
2232        }
2233    }
2234    return true;
2235}
2236
2237// For given cvdescriptorset::DescriptorSet, verify that its Set is compatible w/ the setLayout corresponding to
2238// pipelineLayout[layoutIndex]
2239static bool verify_set_layout_compatibility(layer_data *my_data, const cvdescriptorset::DescriptorSet *pSet,
2240                                            const VkPipelineLayout layout, const uint32_t layoutIndex, string &errorMsg) {
2241    auto pipeline_layout = getPipelineLayout(my_data, layout);
2242    if (!pipeline_layout) {
2243        stringstream errorStr;
2244        errorStr << "invalid VkPipelineLayout (" << layout << ")";
2245        errorMsg = errorStr.str();
2246        return false;
2247    }
2248    if (layoutIndex >= pipeline_layout->descriptorSetLayouts.size()) {
2249        stringstream errorStr;
2250        errorStr << "VkPipelineLayout (" << layout << ") only contains " << pipeline_layout->descriptorSetLayouts.size()
2251                 << " setLayouts corresponding to sets 0-" << pipeline_layout->descriptorSetLayouts.size() - 1
2252                 << ", but you're attempting to bind set to index " << layoutIndex;
2253        errorMsg = errorStr.str();
2254        return false;
2255    }
2256    auto layout_node = pipeline_layout->setLayouts[layoutIndex];
2257    return pSet->IsCompatible(layout_node, &errorMsg);
2258}
2259
2260// Validate that data for each specialization entry is fully contained within the buffer.
2261static bool validate_specialization_offsets(debug_report_data *report_data, VkPipelineShaderStageCreateInfo const *info) {
2262    bool pass = true;
2263
2264    VkSpecializationInfo const *spec = info->pSpecializationInfo;
2265
2266    if (spec) {
2267        for (auto i = 0u; i < spec->mapEntryCount; i++) {
2268            if (spec->pMapEntries[i].offset + spec->pMapEntries[i].size > spec->dataSize) {
2269                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2270                            /*dev*/ 0, __LINE__, SHADER_CHECKER_BAD_SPECIALIZATION, "SC",
2271                            "Specialization entry %u (for constant id %u) references memory outside provided "
2272                            "specialization data (bytes %u.." PRINTF_SIZE_T_SPECIFIER "; " PRINTF_SIZE_T_SPECIFIER
2273                            " bytes provided)",
2274                            i, spec->pMapEntries[i].constantID, spec->pMapEntries[i].offset,
2275                            spec->pMapEntries[i].offset + spec->pMapEntries[i].size - 1, spec->dataSize)) {
2276
2277                    pass = false;
2278                }
2279            }
2280        }
2281    }
2282
2283    return pass;
2284}
2285
2286static bool descriptor_type_match(shader_module const *module, uint32_t type_id,
2287                                  VkDescriptorType descriptor_type, unsigned &descriptor_count) {
2288    auto type = module->get_def(type_id);
2289
2290    descriptor_count = 1;
2291
2292    /* Strip off any array or ptrs. Where we remove array levels, adjust the
2293     * descriptor count for each dimension. */
2294    while (type.opcode() == spv::OpTypeArray || type.opcode() == spv::OpTypePointer) {
2295        if (type.opcode() == spv::OpTypeArray) {
2296            descriptor_count *= get_constant_value(module, type.word(3));
2297            type = module->get_def(type.word(2));
2298        }
2299        else {
2300            type = module->get_def(type.word(3));
2301        }
2302    }
2303
2304    switch (type.opcode()) {
2305    case spv::OpTypeStruct: {
2306        for (auto insn : *module) {
2307            if (insn.opcode() == spv::OpDecorate && insn.word(1) == type.word(1)) {
2308                if (insn.word(2) == spv::DecorationBlock) {
2309                    return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
2310                           descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
2311                } else if (insn.word(2) == spv::DecorationBufferBlock) {
2312                    return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
2313                           descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
2314                }
2315            }
2316        }
2317
2318        /* Invalid */
2319        return false;
2320    }
2321
2322    case spv::OpTypeSampler:
2323        return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLER;
2324
2325    case spv::OpTypeSampledImage:
2326        if (descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) {
2327            /* Slight relaxation for some GLSL historical madness: samplerBuffer
2328             * doesn't really have a sampler, and a texel buffer descriptor
2329             * doesn't really provide one. Allow this slight mismatch.
2330             */
2331            auto image_type = module->get_def(type.word(2));
2332            auto dim = image_type.word(3);
2333            auto sampled = image_type.word(7);
2334            return dim == spv::DimBuffer && sampled == 1;
2335        }
2336        return descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
2337
2338    case spv::OpTypeImage: {
2339        /* Many descriptor types backing image types-- depends on dimension
2340         * and whether the image will be used with a sampler. SPIRV for
2341         * Vulkan requires that sampled be 1 or 2 -- leaving the decision to
2342         * runtime is unacceptable.
2343         */
2344        auto dim = type.word(3);
2345        auto sampled = type.word(7);
2346
2347        if (dim == spv::DimSubpassData) {
2348            return descriptor_type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
2349        } else if (dim == spv::DimBuffer) {
2350            if (sampled == 1) {
2351                return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
2352            } else {
2353                return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
2354            }
2355        } else if (sampled == 1) {
2356            return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
2357        } else {
2358            return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
2359        }
2360    }
2361
2362    /* We shouldn't really see any other junk types -- but if we do, they're
2363     * a mismatch.
2364     */
2365    default:
2366        return false; /* Mismatch */
2367    }
2368}
2369
2370static bool require_feature(debug_report_data *report_data, VkBool32 feature, char const *feature_name) {
2371    if (!feature) {
2372        if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2373                    __LINE__, SHADER_CHECKER_FEATURE_NOT_ENABLED, "SC",
2374                    "Shader requires VkPhysicalDeviceFeatures::%s but is not "
2375                    "enabled on the device",
2376                    feature_name)) {
2377            return false;
2378        }
2379    }
2380
2381    return true;
2382}
2383
2384static bool validate_shader_capabilities(debug_report_data *report_data, shader_module const *src,
2385                                         VkPhysicalDeviceFeatures const *enabledFeatures) {
2386    bool pass = true;
2387
2388
2389    for (auto insn : *src) {
2390        if (insn.opcode() == spv::OpCapability) {
2391            switch (insn.word(1)) {
2392            case spv::CapabilityMatrix:
2393            case spv::CapabilityShader:
2394            case spv::CapabilityInputAttachment:
2395            case spv::CapabilitySampled1D:
2396            case spv::CapabilityImage1D:
2397            case spv::CapabilitySampledBuffer:
2398            case spv::CapabilityImageBuffer:
2399            case spv::CapabilityImageQuery:
2400            case spv::CapabilityDerivativeControl:
2401                // Always supported by a Vulkan 1.0 implementation -- no feature bits.
2402                break;
2403
2404            case spv::CapabilityGeometry:
2405                pass &= require_feature(report_data, enabledFeatures->geometryShader, "geometryShader");
2406                break;
2407
2408            case spv::CapabilityTessellation:
2409                pass &= require_feature(report_data, enabledFeatures->tessellationShader, "tessellationShader");
2410                break;
2411
2412            case spv::CapabilityFloat64:
2413                pass &= require_feature(report_data, enabledFeatures->shaderFloat64, "shaderFloat64");
2414                break;
2415
2416            case spv::CapabilityInt64:
2417                pass &= require_feature(report_data, enabledFeatures->shaderInt64, "shaderInt64");
2418                break;
2419
2420            case spv::CapabilityTessellationPointSize:
2421            case spv::CapabilityGeometryPointSize:
2422                pass &= require_feature(report_data, enabledFeatures->shaderTessellationAndGeometryPointSize,
2423                                        "shaderTessellationAndGeometryPointSize");
2424                break;
2425
2426            case spv::CapabilityImageGatherExtended:
2427                pass &= require_feature(report_data, enabledFeatures->shaderImageGatherExtended, "shaderImageGatherExtended");
2428                break;
2429
2430            case spv::CapabilityStorageImageMultisample:
2431                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample");
2432                break;
2433
2434            case spv::CapabilityUniformBufferArrayDynamicIndexing:
2435                pass &= require_feature(report_data, enabledFeatures->shaderUniformBufferArrayDynamicIndexing,
2436                                        "shaderUniformBufferArrayDynamicIndexing");
2437                break;
2438
2439            case spv::CapabilitySampledImageArrayDynamicIndexing:
2440                pass &= require_feature(report_data, enabledFeatures->shaderSampledImageArrayDynamicIndexing,
2441                                        "shaderSampledImageArrayDynamicIndexing");
2442                break;
2443
2444            case spv::CapabilityStorageBufferArrayDynamicIndexing:
2445                pass &= require_feature(report_data, enabledFeatures->shaderStorageBufferArrayDynamicIndexing,
2446                                        "shaderStorageBufferArrayDynamicIndexing");
2447                break;
2448
2449            case spv::CapabilityStorageImageArrayDynamicIndexing:
2450                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageArrayDynamicIndexing,
2451                                        "shaderStorageImageArrayDynamicIndexing");
2452                break;
2453
2454            case spv::CapabilityClipDistance:
2455                pass &= require_feature(report_data, enabledFeatures->shaderClipDistance, "shaderClipDistance");
2456                break;
2457
2458            case spv::CapabilityCullDistance:
2459                pass &= require_feature(report_data, enabledFeatures->shaderCullDistance, "shaderCullDistance");
2460                break;
2461
2462            case spv::CapabilityImageCubeArray:
2463                pass &= require_feature(report_data, enabledFeatures->imageCubeArray, "imageCubeArray");
2464                break;
2465
2466            case spv::CapabilitySampleRateShading:
2467                pass &= require_feature(report_data, enabledFeatures->sampleRateShading, "sampleRateShading");
2468                break;
2469
2470            case spv::CapabilitySparseResidency:
2471                pass &= require_feature(report_data, enabledFeatures->shaderResourceResidency, "shaderResourceResidency");
2472                break;
2473
2474            case spv::CapabilityMinLod:
2475                pass &= require_feature(report_data, enabledFeatures->shaderResourceMinLod, "shaderResourceMinLod");
2476                break;
2477
2478            case spv::CapabilitySampledCubeArray:
2479                pass &= require_feature(report_data, enabledFeatures->imageCubeArray, "imageCubeArray");
2480                break;
2481
2482            case spv::CapabilityImageMSArray:
2483                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample");
2484                break;
2485
2486            case spv::CapabilityStorageImageExtendedFormats:
2487                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageExtendedFormats,
2488                                        "shaderStorageImageExtendedFormats");
2489                break;
2490
2491            case spv::CapabilityInterpolationFunction:
2492                pass &= require_feature(report_data, enabledFeatures->sampleRateShading, "sampleRateShading");
2493                break;
2494
2495            case spv::CapabilityStorageImageReadWithoutFormat:
2496                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageReadWithoutFormat,
2497                                        "shaderStorageImageReadWithoutFormat");
2498                break;
2499
2500            case spv::CapabilityStorageImageWriteWithoutFormat:
2501                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageWriteWithoutFormat,
2502                                        "shaderStorageImageWriteWithoutFormat");
2503                break;
2504
2505            case spv::CapabilityMultiViewport:
2506                pass &= require_feature(report_data, enabledFeatures->multiViewport, "multiViewport");
2507                break;
2508
2509            default:
2510                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2511                            __LINE__, SHADER_CHECKER_BAD_CAPABILITY, "SC",
2512                            "Shader declares capability %u, not supported in Vulkan.",
2513                            insn.word(1)))
2514                    pass = false;
2515                break;
2516            }
2517        }
2518    }
2519
2520    return pass;
2521}
2522
2523static bool validate_pipeline_shader_stage(debug_report_data *report_data,
2524                                           VkPipelineShaderStageCreateInfo const *pStage,
2525                                           PIPELINE_NODE *pipeline,
2526                                           shader_module **out_module,
2527                                           spirv_inst_iter *out_entrypoint,
2528                                           VkPhysicalDeviceFeatures const *enabledFeatures,
2529                                           std::unordered_map<VkShaderModule,
2530                                           std::unique_ptr<shader_module>> const &shaderModuleMap) {
2531    bool pass = true;
2532    auto module_it = shaderModuleMap.find(pStage->module);
2533    auto module = *out_module = module_it->second.get();
2534    pass &= validate_specialization_offsets(report_data, pStage);
2535
2536    /* find the entrypoint */
2537    auto entrypoint = *out_entrypoint = find_entrypoint(module, pStage->pName, pStage->stage);
2538    if (entrypoint == module->end()) {
2539        if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2540                    __LINE__, SHADER_CHECKER_MISSING_ENTRYPOINT, "SC",
2541                    "No entrypoint found named `%s` for stage %s", pStage->pName,
2542                    string_VkShaderStageFlagBits(pStage->stage))) {
2543            pass = false;
2544        }
2545    }
2546
2547    /* validate shader capabilities against enabled device features */
2548    pass &= validate_shader_capabilities(report_data, module, enabledFeatures);
2549
2550    /* mark accessible ids */
2551    std::unordered_set<uint32_t> accessible_ids;
2552    mark_accessible_ids(module, entrypoint, accessible_ids);
2553
2554    /* validate descriptor set layout against what the entrypoint actually uses */
2555    std::map<descriptor_slot_t, interface_var> descriptor_uses;
2556    collect_interface_by_descriptor_slot(report_data, module, accessible_ids, descriptor_uses);
2557
2558    auto pipelineLayout = pipeline->pipelineLayout;
2559
2560    /* validate push constant usage */
2561    pass &= validate_push_constant_usage(report_data, &pipelineLayout->pushConstantRanges,
2562                                        module, accessible_ids, pStage->stage);
2563
2564    /* validate descriptor use */
2565    for (auto use : descriptor_uses) {
2566        // While validating shaders capture which slots are used by the pipeline
2567        pipeline->active_slots[use.first.first].insert(use.first.second);
2568
2569        /* verify given pipelineLayout has requested setLayout with requested binding */
2570        const auto & binding = get_descriptor_binding(pipelineLayout, use.first);
2571        unsigned required_descriptor_count;
2572
2573        if (!binding) {
2574            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2575                        __LINE__, SHADER_CHECKER_MISSING_DESCRIPTOR, "SC",
2576                        "Shader uses descriptor slot %u.%u (used as type `%s`) but not declared in pipeline layout",
2577                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str())) {
2578                pass = false;
2579            }
2580        } else if (~binding->stageFlags & pStage->stage) {
2581            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2582                        /*dev*/ 0, __LINE__, SHADER_CHECKER_DESCRIPTOR_NOT_ACCESSIBLE_FROM_STAGE, "SC",
2583                        "Shader uses descriptor slot %u.%u (used "
2584                        "as type `%s`) but descriptor not "
2585                        "accessible from stage %s",
2586                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str(),
2587                        string_VkShaderStageFlagBits(pStage->stage))) {
2588                pass = false;
2589            }
2590        } else if (!descriptor_type_match(module, use.second.type_id, binding->descriptorType,
2591                                          /*out*/ required_descriptor_count)) {
2592            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2593                        SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC", "Type mismatch on descriptor slot "
2594                                                                       "%u.%u (used as type `%s`) but "
2595                                                                       "descriptor of type %s",
2596                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str(),
2597                        string_VkDescriptorType(binding->descriptorType))) {
2598                pass = false;
2599            }
2600        } else if (binding->descriptorCount < required_descriptor_count) {
2601            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2602                        SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC",
2603                        "Shader expects at least %u descriptors for binding %u.%u (used as type `%s`) but only %u provided",
2604                        required_descriptor_count, use.first.first, use.first.second,
2605                        describe_type(module, use.second.type_id).c_str(), binding->descriptorCount)) {
2606                pass = false;
2607            }
2608        }
2609    }
2610
2611    return pass;
2612}
2613
2614
2615// Validate that the shaders used by the given pipeline and store the active_slots
2616//  that are actually used by the pipeline into pPipeline->active_slots
2617static bool validate_and_capture_pipeline_shader_state(debug_report_data *report_data, PIPELINE_NODE *pPipeline,
2618                                                       VkPhysicalDeviceFeatures const *enabledFeatures,
2619                                                       std::unordered_map<VkShaderModule, unique_ptr<shader_module>> const & shaderModuleMap) {
2620    auto pCreateInfo = pPipeline->graphicsPipelineCI.ptr();
2621    int vertex_stage = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2622    int fragment_stage = get_shader_stage_id(VK_SHADER_STAGE_FRAGMENT_BIT);
2623
2624    shader_module *shaders[5];
2625    memset(shaders, 0, sizeof(shaders));
2626    spirv_inst_iter entrypoints[5];
2627    memset(entrypoints, 0, sizeof(entrypoints));
2628    VkPipelineVertexInputStateCreateInfo const *vi = 0;
2629    bool pass = true;
2630
2631    for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
2632        auto pStage = &pCreateInfo->pStages[i];
2633        auto stage_id = get_shader_stage_id(pStage->stage);
2634        pass &= validate_pipeline_shader_stage(report_data, pStage, pPipeline,
2635                                               &shaders[stage_id], &entrypoints[stage_id],
2636                                               enabledFeatures, shaderModuleMap);
2637    }
2638
2639    vi = pCreateInfo->pVertexInputState;
2640
2641    if (vi) {
2642        pass &= validate_vi_consistency(report_data, vi);
2643    }
2644
2645    if (shaders[vertex_stage]) {
2646        pass &= validate_vi_against_vs_inputs(report_data, vi, shaders[vertex_stage], entrypoints[vertex_stage]);
2647    }
2648
2649    int producer = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2650    int consumer = get_shader_stage_id(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT);
2651
2652    while (!shaders[producer] && producer != fragment_stage) {
2653        producer++;
2654        consumer++;
2655    }
2656
2657    for (; producer != fragment_stage && consumer <= fragment_stage; consumer++) {
2658        assert(shaders[producer]);
2659        if (shaders[consumer]) {
2660            pass &= validate_interface_between_stages(report_data,
2661                                                      shaders[producer], entrypoints[producer], &shader_stage_attribs[producer],
2662                                                      shaders[consumer], entrypoints[consumer], &shader_stage_attribs[consumer]);
2663
2664            producer = consumer;
2665        }
2666    }
2667
2668    if (shaders[fragment_stage] && pPipeline->renderPass) {
2669        pass &= validate_fs_outputs_against_render_pass(report_data, shaders[fragment_stage], entrypoints[fragment_stage],
2670                                                        pPipeline->renderPass, pCreateInfo->subpass);
2671    }
2672
2673    return pass;
2674}
2675
2676static bool validate_compute_pipeline(debug_report_data *report_data, PIPELINE_NODE *pPipeline, VkPhysicalDeviceFeatures const *enabledFeatures,
2677                                      std::unordered_map<VkShaderModule, unique_ptr<shader_module>> const & shaderModuleMap) {
2678    auto pCreateInfo = pPipeline->computePipelineCI.ptr();
2679
2680    shader_module *module;
2681    spirv_inst_iter entrypoint;
2682
2683    return validate_pipeline_shader_stage(report_data, &pCreateInfo->stage, pPipeline,
2684                                          &module, &entrypoint, enabledFeatures, shaderModuleMap);
2685}
2686// Return Set node ptr for specified set or else NULL
2687cvdescriptorset::DescriptorSet *getSetNode(const layer_data *my_data, VkDescriptorSet set) {
2688    auto set_it = my_data->setMap.find(set);
2689    if (set_it == my_data->setMap.end()) {
2690        return NULL;
2691    }
2692    return set_it->second;
2693}
2694// For the given command buffer, verify and update the state for activeSetBindingsPairs
2695//  This includes:
2696//  1. Verifying that any dynamic descriptor in that set has a valid dynamic offset bound.
2697//     To be valid, the dynamic offset combined with the offset and range from its
2698//     descriptor update must not overflow the size of its buffer being updated
2699//  2. Grow updateImages for given pCB to include any bound STORAGE_IMAGE descriptor images
2700//  3. Grow updateBuffers for pCB to include buffers from STORAGE*_BUFFER descriptor buffers
2701static bool validate_and_update_drawtime_descriptor_state(
2702    layer_data *dev_data, GLOBAL_CB_NODE *pCB,
2703    const vector<std::tuple<cvdescriptorset::DescriptorSet *, unordered_set<uint32_t>,
2704                            std::vector<uint32_t> const *>> &activeSetBindingsPairs) {
2705    bool result = false;
2706    for (auto set_bindings_pair : activeSetBindingsPairs) {
2707        cvdescriptorset::DescriptorSet *set_node = std::get<0>(set_bindings_pair);
2708        std::string err_str;
2709        if (!set_node->ValidateDrawState(std::get<1>(set_bindings_pair), *std::get<2>(set_bindings_pair),
2710                                         &err_str)) {
2711            // Report error here
2712            auto set = set_node->GetSet();
2713            result |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2714                              reinterpret_cast<const uint64_t &>(set), __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
2715                              "DS 0x%" PRIxLEAST64 " encountered the following validation error at draw time: %s",
2716                              reinterpret_cast<const uint64_t &>(set), err_str.c_str());
2717        }
2718        set_node->GetStorageUpdates(std::get<1>(set_bindings_pair), &pCB->updateBuffers, &pCB->updateImages);
2719    }
2720    return result;
2721}
2722
2723// For given pipeline, return number of MSAA samples, or one if MSAA disabled
2724static VkSampleCountFlagBits getNumSamples(PIPELINE_NODE const *pipe) {
2725    if (pipe->graphicsPipelineCI.pMultisampleState != NULL &&
2726        VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO == pipe->graphicsPipelineCI.pMultisampleState->sType) {
2727        return pipe->graphicsPipelineCI.pMultisampleState->rasterizationSamples;
2728    }
2729    return VK_SAMPLE_COUNT_1_BIT;
2730}
2731
2732// Validate draw-time state related to the PSO
2733static bool validatePipelineDrawtimeState(layer_data const *my_data,
2734                                          LAST_BOUND_STATE const &state,
2735                                          const GLOBAL_CB_NODE *pCB,
2736                                          PIPELINE_NODE const *pPipeline) {
2737    bool skip_call = false;
2738
2739    // Verify Vtx binding
2740    if (pPipeline->vertexBindingDescriptions.size() > 0) {
2741        for (size_t i = 0; i < pPipeline->vertexBindingDescriptions.size(); i++) {
2742            if ((pCB->currentDrawData.buffers.size() < (i + 1)) || (pCB->currentDrawData.buffers[i] == VK_NULL_HANDLE)) {
2743                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
2744                                  __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
2745                                  "The Pipeline State Object (0x%" PRIxLEAST64
2746                                  ") expects that this Command Buffer's vertex binding Index " PRINTF_SIZE_T_SPECIFIER
2747                                  " should be set via vkCmdBindVertexBuffers.",
2748                                  (uint64_t)state.pipeline, i);
2749            }
2750        }
2751    } else {
2752        if (!pCB->currentDrawData.buffers.empty()) {
2753            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
2754                              0, __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
2755                              "Vertex buffers are bound to command buffer (0x%" PRIxLEAST64
2756                              ") but no vertex buffers are attached to this Pipeline State Object (0x%" PRIxLEAST64 ").",
2757                              (uint64_t)pCB->commandBuffer, (uint64_t)state.pipeline);
2758        }
2759    }
2760    // If Viewport or scissors are dynamic, verify that dynamic count matches PSO count.
2761    // Skip check if rasterization is disabled or there is no viewport.
2762    if ((!pPipeline->graphicsPipelineCI.pRasterizationState ||
2763         (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) &&
2764        pPipeline->graphicsPipelineCI.pViewportState) {
2765        bool dynViewport = isDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
2766        bool dynScissor = isDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
2767        if (dynViewport) {
2768            if (pCB->viewports.size() != pPipeline->graphicsPipelineCI.pViewportState->viewportCount) {
2769                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
2770                                  __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
2771                                  "Dynamic viewportCount from vkCmdSetViewport() is " PRINTF_SIZE_T_SPECIFIER
2772                                  ", but PSO viewportCount is %u. These counts must match.",
2773                                  pCB->viewports.size(), pPipeline->graphicsPipelineCI.pViewportState->viewportCount);
2774            }
2775        }
2776        if (dynScissor) {
2777            if (pCB->scissors.size() != pPipeline->graphicsPipelineCI.pViewportState->scissorCount) {
2778                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
2779                                  __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
2780                                  "Dynamic scissorCount from vkCmdSetScissor() is " PRINTF_SIZE_T_SPECIFIER
2781                                  ", but PSO scissorCount is %u. These counts must match.",
2782                                  pCB->scissors.size(), pPipeline->graphicsPipelineCI.pViewportState->scissorCount);
2783            }
2784        }
2785    }
2786
2787    // Verify that any MSAA request in PSO matches sample# in bound FB
2788    // Skip the check if rasterization is disabled.
2789    if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
2790        (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
2791        VkSampleCountFlagBits pso_num_samples = getNumSamples(pPipeline);
2792        if (pCB->activeRenderPass) {
2793            const VkRenderPassCreateInfo *render_pass_info = pCB->activeRenderPass->pCreateInfo;
2794            const VkSubpassDescription *subpass_desc = &render_pass_info->pSubpasses[pCB->activeSubpass];
2795            VkSampleCountFlagBits subpass_num_samples = VkSampleCountFlagBits(0);
2796            uint32_t i;
2797
2798            const safe_VkPipelineColorBlendStateCreateInfo *color_blend_state = pPipeline->graphicsPipelineCI.pColorBlendState;
2799            if ((color_blend_state != NULL) && (pCB->activeSubpass == pPipeline->graphicsPipelineCI.subpass) &&
2800                (color_blend_state->attachmentCount != subpass_desc->colorAttachmentCount)) {
2801                skip_call |=
2802                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
2803                                reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
2804                                "Render pass subpass %u mismatch with blending state defined and blend state attachment "
2805                                "count %u while subpass color attachment count %u in Pipeline (0x%" PRIxLEAST64 ")!  These "
2806                                "must be the same at draw-time.",
2807                                pCB->activeSubpass, color_blend_state->attachmentCount, subpass_desc->colorAttachmentCount,
2808                                reinterpret_cast<const uint64_t &>(pPipeline->pipeline));
2809            }
2810
2811            for (i = 0; i < subpass_desc->colorAttachmentCount; i++) {
2812                VkSampleCountFlagBits samples;
2813
2814                if (subpass_desc->pColorAttachments[i].attachment == VK_ATTACHMENT_UNUSED)
2815                    continue;
2816
2817                samples = render_pass_info->pAttachments[subpass_desc->pColorAttachments[i].attachment].samples;
2818                if (subpass_num_samples == static_cast<VkSampleCountFlagBits>(0)) {
2819                    subpass_num_samples = samples;
2820                } else if (subpass_num_samples != samples) {
2821                    subpass_num_samples = static_cast<VkSampleCountFlagBits>(-1);
2822                    break;
2823                }
2824            }
2825            if ((subpass_desc->pDepthStencilAttachment != NULL) &&
2826                (subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) {
2827                const VkSampleCountFlagBits samples =
2828                        render_pass_info->pAttachments[subpass_desc->pDepthStencilAttachment->attachment].samples;
2829                if (subpass_num_samples == static_cast<VkSampleCountFlagBits>(0))
2830                    subpass_num_samples = samples;
2831                else if (subpass_num_samples != samples)
2832                    subpass_num_samples = static_cast<VkSampleCountFlagBits>(-1);
2833            }
2834
2835            if (((subpass_desc->colorAttachmentCount > 0) || (subpass_desc->pDepthStencilAttachment != NULL)) &&
2836                (pso_num_samples != subpass_num_samples)) {
2837                skip_call |=
2838                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
2839                                reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
2840                                "Num samples mismatch! At draw-time in Pipeline (0x%" PRIxLEAST64
2841                                ") with %u samples while current RenderPass (0x%" PRIxLEAST64 ") w/ %u samples!",
2842                                reinterpret_cast<const uint64_t &>(pPipeline->pipeline), pso_num_samples,
2843                                reinterpret_cast<const uint64_t &>(pCB->activeRenderPass->renderPass), subpass_num_samples);
2844            }
2845        } else {
2846            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
2847                                 reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
2848                                 "No active render pass found at draw-time in Pipeline (0x%" PRIxLEAST64 ")!",
2849                                 reinterpret_cast<const uint64_t &>(pPipeline->pipeline));
2850        }
2851    }
2852    // TODO : Add more checks here
2853
2854    return skip_call;
2855}
2856
2857// Validate overall state at the time of a draw call
2858static bool validate_and_update_draw_state(layer_data *my_data, GLOBAL_CB_NODE *pCB, const bool indexedDraw,
2859                                           const VkPipelineBindPoint bindPoint) {
2860    bool result = false;
2861    auto const &state = pCB->lastBound[bindPoint];
2862    PIPELINE_NODE *pPipe = getPipeline(my_data, state.pipeline);
2863    if (nullptr == pPipe) {
2864        result |= log_msg(
2865            my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2866            DRAWSTATE_INVALID_PIPELINE, "DS",
2867            "At Draw/Dispatch time no valid VkPipeline is bound! This is illegal. Please bind one with vkCmdBindPipeline().");
2868        // Early return as any further checks below will be busted w/o a pipeline
2869        if (result)
2870            return true;
2871    }
2872    // First check flag states
2873    if (VK_PIPELINE_BIND_POINT_GRAPHICS == bindPoint)
2874        result = validate_draw_state_flags(my_data, pCB, pPipe, indexedDraw);
2875
2876    // Now complete other state checks
2877    if (state.pipelineLayout) {
2878        string errorString;
2879        auto pipelineLayout = (bindPoint == VK_PIPELINE_BIND_POINT_GRAPHICS) ? pPipe->graphicsPipelineCI.layout : pPipe->computePipelineCI.layout;
2880
2881        // Need a vector (vs. std::set) of active Sets for dynamicOffset validation in case same set bound w/ different offsets
2882        vector<std::tuple<cvdescriptorset::DescriptorSet *, unordered_set<uint32_t>, std::vector<uint32_t> const *>> activeSetBindingsPairs;
2883        for (auto & setBindingPair : pPipe->active_slots) {
2884            uint32_t setIndex = setBindingPair.first;
2885            // If valid set is not bound throw an error
2886            if ((state.boundDescriptorSets.size() <= setIndex) || (!state.boundDescriptorSets[setIndex])) {
2887                result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
2888                                  DRAWSTATE_DESCRIPTOR_SET_NOT_BOUND, "DS",
2889                                  "VkPipeline 0x%" PRIxLEAST64 " uses set #%u but that set is not bound.", (uint64_t)pPipe->pipeline,
2890                                  setIndex);
2891            } else if (!verify_set_layout_compatibility(my_data, state.boundDescriptorSets[setIndex],
2892                                                        pipelineLayout, setIndex, errorString)) {
2893                // Set is bound but not compatible w/ overlapping pipelineLayout from PSO
2894                VkDescriptorSet setHandle = state.boundDescriptorSets[setIndex]->GetSet();
2895                result |=
2896                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2897                            (uint64_t)setHandle, __LINE__, DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
2898                            "VkDescriptorSet (0x%" PRIxLEAST64
2899                            ") bound as set #%u is not compatible with overlapping VkPipelineLayout 0x%" PRIxLEAST64 " due to: %s",
2900                            (uint64_t)setHandle, setIndex, (uint64_t)pipelineLayout, errorString.c_str());
2901            } else { // Valid set is bound and layout compatible, validate that it's updated
2902                // Pull the set node
2903                cvdescriptorset::DescriptorSet *pSet = state.boundDescriptorSets[setIndex];
2904                // Save vector of all active sets to verify dynamicOffsets below
2905                activeSetBindingsPairs.push_back(std::make_tuple(pSet, setBindingPair.second,
2906                                                                 &state.dynamicOffsets[setIndex]));
2907                // Make sure set has been updated if it has no immutable samplers
2908                //  If it has immutable samplers, we'll flag error later as needed depending on binding
2909                if (!pSet->IsUpdated()) {
2910                    for (auto binding : setBindingPair.second) {
2911                        if (!pSet->GetImmutableSamplerPtrFromBinding(binding)) {
2912                            result |= log_msg(
2913                                my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2914                                (uint64_t)pSet->GetSet(), __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
2915                                "DS 0x%" PRIxLEAST64 " bound but it was never updated. It is now being used to draw so "
2916                                "this will result in undefined behavior.",
2917                                (uint64_t)pSet->GetSet());
2918                        }
2919                    }
2920                }
2921            }
2922        }
2923        // For given active slots, verify any dynamic descriptors and record updated images & buffers
2924        result |= validate_and_update_drawtime_descriptor_state(my_data, pCB, activeSetBindingsPairs);
2925    }
2926
2927    // Check general pipeline state that needs to be validated at drawtime
2928    if (VK_PIPELINE_BIND_POINT_GRAPHICS == bindPoint)
2929        result |= validatePipelineDrawtimeState(my_data, state, pCB, pPipe);
2930
2931    return result;
2932}
2933
2934// Validate HW line width capabilities prior to setting requested line width.
2935static bool verifyLineWidth(layer_data *my_data, DRAW_STATE_ERROR dsError, const uint64_t &target, float lineWidth) {
2936    bool skip_call = false;
2937
2938    // First check to see if the physical device supports wide lines.
2939    if ((VK_FALSE == my_data->phys_dev_properties.features.wideLines) && (1.0f != lineWidth)) {
2940        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, target, __LINE__,
2941                             dsError, "DS", "Attempt to set lineWidth to %f but physical device wideLines feature "
2942                                            "not supported/enabled so lineWidth must be 1.0f!",
2943                             lineWidth);
2944    } else {
2945        // Otherwise, make sure the width falls in the valid range.
2946        if ((my_data->phys_dev_properties.properties.limits.lineWidthRange[0] > lineWidth) ||
2947            (my_data->phys_dev_properties.properties.limits.lineWidthRange[1] < lineWidth)) {
2948            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, target,
2949                                 __LINE__, dsError, "DS", "Attempt to set lineWidth to %f but physical device limits line width "
2950                                                          "to between [%f, %f]!",
2951                                 lineWidth, my_data->phys_dev_properties.properties.limits.lineWidthRange[0],
2952                                 my_data->phys_dev_properties.properties.limits.lineWidthRange[1]);
2953        }
2954    }
2955
2956    return skip_call;
2957}
2958
2959// Verify that create state for a pipeline is valid
2960static bool verifyPipelineCreateState(layer_data *my_data, const VkDevice device, std::vector<PIPELINE_NODE *> pPipelines,
2961                                      int pipelineIndex) {
2962    bool skipCall = false;
2963
2964    PIPELINE_NODE *pPipeline = pPipelines[pipelineIndex];
2965
2966    // If create derivative bit is set, check that we've specified a base
2967    // pipeline correctly, and that the base pipeline was created to allow
2968    // derivatives.
2969    if (pPipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
2970        PIPELINE_NODE *pBasePipeline = nullptr;
2971        if (!((pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) ^
2972              (pPipeline->graphicsPipelineCI.basePipelineIndex != -1))) {
2973            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
2974                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
2975                                "Invalid Pipeline CreateInfo: exactly one of base pipeline index and handle must be specified");
2976        } else if (pPipeline->graphicsPipelineCI.basePipelineIndex != -1) {
2977            if (pPipeline->graphicsPipelineCI.basePipelineIndex >= pipelineIndex) {
2978                skipCall |=
2979                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
2980                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
2981                            "Invalid Pipeline CreateInfo: base pipeline must occur earlier in array than derivative pipeline.");
2982            } else {
2983                pBasePipeline = pPipelines[pPipeline->graphicsPipelineCI.basePipelineIndex];
2984            }
2985        } else if (pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) {
2986            pBasePipeline = getPipeline(my_data, pPipeline->graphicsPipelineCI.basePipelineHandle);
2987        }
2988
2989        if (pBasePipeline && !(pBasePipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) {
2990            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
2991                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
2992                                "Invalid Pipeline CreateInfo: base pipeline does not allow derivatives.");
2993        }
2994    }
2995
2996    if (pPipeline->graphicsPipelineCI.pColorBlendState != NULL) {
2997        if (!my_data->phys_dev_properties.features.independentBlend) {
2998            if (pPipeline->attachments.size() > 1) {
2999                VkPipelineColorBlendAttachmentState *pAttachments = &pPipeline->attachments[0];
3000                for (size_t i = 1; i < pPipeline->attachments.size(); i++) {
3001                    if ((pAttachments[0].blendEnable != pAttachments[i].blendEnable) ||
3002                        (pAttachments[0].srcColorBlendFactor != pAttachments[i].srcColorBlendFactor) ||
3003                        (pAttachments[0].dstColorBlendFactor != pAttachments[i].dstColorBlendFactor) ||
3004                        (pAttachments[0].colorBlendOp != pAttachments[i].colorBlendOp) ||
3005                        (pAttachments[0].srcAlphaBlendFactor != pAttachments[i].srcAlphaBlendFactor) ||
3006                        (pAttachments[0].dstAlphaBlendFactor != pAttachments[i].dstAlphaBlendFactor) ||
3007                        (pAttachments[0].alphaBlendOp != pAttachments[i].alphaBlendOp) ||
3008                        (pAttachments[0].colorWriteMask != pAttachments[i].colorWriteMask)) {
3009                        skipCall |=
3010                            log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3011                            DRAWSTATE_INDEPENDENT_BLEND, "DS", "Invalid Pipeline CreateInfo: If independent blend feature not "
3012                            "enabled, all elements of pAttachments must be identical");
3013                    }
3014                }
3015            }
3016        }
3017        if (!my_data->phys_dev_properties.features.logicOp &&
3018            (pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable != VK_FALSE)) {
3019            skipCall |=
3020                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3021                        DRAWSTATE_DISABLED_LOGIC_OP, "DS",
3022                        "Invalid Pipeline CreateInfo: If logic operations feature not enabled, logicOpEnable must be VK_FALSE");
3023        }
3024        if ((pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable == VK_TRUE) &&
3025            ((pPipeline->graphicsPipelineCI.pColorBlendState->logicOp < VK_LOGIC_OP_CLEAR) ||
3026             (pPipeline->graphicsPipelineCI.pColorBlendState->logicOp > VK_LOGIC_OP_SET))) {
3027            skipCall |=
3028                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3029                        DRAWSTATE_INVALID_LOGIC_OP, "DS",
3030                        "Invalid Pipeline CreateInfo: If logicOpEnable is VK_TRUE, logicOp must be a valid VkLogicOp value");
3031        }
3032    }
3033
3034    // Ensure the subpass index is valid. If not, then validate_and_capture_pipeline_shader_state
3035    // produces nonsense errors that confuse users. Other layers should already
3036    // emit errors for renderpass being invalid.
3037    auto renderPass = getRenderPass(my_data, pPipeline->graphicsPipelineCI.renderPass);
3038    if (renderPass &&
3039        pPipeline->graphicsPipelineCI.subpass >= renderPass->pCreateInfo->subpassCount) {
3040        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3041                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: Subpass index %u "
3042                                                                           "is out of range for this renderpass (0..%u)",
3043                            pPipeline->graphicsPipelineCI.subpass, renderPass->pCreateInfo->subpassCount - 1);
3044    }
3045
3046    if (!validate_and_capture_pipeline_shader_state(my_data->report_data, pPipeline, &my_data->phys_dev_properties.features,
3047                                                    my_data->shaderModuleMap)) {
3048        skipCall = true;
3049    }
3050    // Each shader's stage must be unique
3051    if (pPipeline->duplicate_shaders) {
3052        for (uint32_t stage = VK_SHADER_STAGE_VERTEX_BIT; stage & VK_SHADER_STAGE_ALL_GRAPHICS; stage <<= 1) {
3053            if (pPipeline->duplicate_shaders & stage) {
3054                skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
3055                                    __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3056                                    "Invalid Pipeline CreateInfo State: Multiple shaders provided for stage %s",
3057                                    string_VkShaderStageFlagBits(VkShaderStageFlagBits(stage)));
3058            }
3059        }
3060    }
3061    // VS is required
3062    if (!(pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT)) {
3063        skipCall |=
3064            log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3065                    DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: Vtx Shader required");
3066    }
3067    // Either both or neither TC/TE shaders should be defined
3068    if (((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) == 0) !=
3069        ((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) == 0)) {
3070        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3071                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3072                            "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair");
3073    }
3074    // Compute shaders should be specified independent of Gfx shaders
3075    if ((pPipeline->active_shaders & VK_SHADER_STAGE_COMPUTE_BIT) &&
3076        (pPipeline->active_shaders &
3077         (VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT |
3078          VK_SHADER_STAGE_GEOMETRY_BIT | VK_SHADER_STAGE_FRAGMENT_BIT))) {
3079        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3080                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3081                            "Invalid Pipeline CreateInfo State: Do not specify Compute Shader for Gfx Pipeline");
3082    }
3083    // VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid for tessellation pipelines.
3084    // Mismatching primitive topology and tessellation fails graphics pipeline creation.
3085    if (pPipeline->active_shaders & (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) &&
3086        (!pPipeline->graphicsPipelineCI.pInputAssemblyState ||
3087         pPipeline->graphicsPipelineCI.pInputAssemblyState->topology != VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
3088        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3089                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3090                                                                           "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST must be set as IA "
3091                                                                           "topology for tessellation pipelines");
3092    }
3093    if (pPipeline->graphicsPipelineCI.pInputAssemblyState &&
3094        pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) {
3095        if (~pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) {
3096            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3097                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3098                                                                               "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3099                                                                               "topology is only valid for tessellation pipelines");
3100        }
3101        if (!pPipeline->graphicsPipelineCI.pTessellationState) {
3102            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3103                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3104                                "Invalid Pipeline CreateInfo State: "
3105                                "pTessellationState is NULL when VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3106                                "topology used. pTessellationState must not be NULL in this case.");
3107        } else if (!pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints ||
3108                   (pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints > 32)) {
3109            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3110                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3111                                                                               "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3112                                                                               "topology used with patchControlPoints value %u."
3113                                                                               " patchControlPoints should be >0 and <=32.",
3114                                pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints);
3115        }
3116    }
3117    // If a rasterization state is provided, make sure that the line width conforms to the HW.
3118    if (pPipeline->graphicsPipelineCI.pRasterizationState) {
3119        if (!isDynamic(pPipeline, VK_DYNAMIC_STATE_LINE_WIDTH)) {
3120            skipCall |= verifyLineWidth(my_data, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, reinterpret_cast<uint64_t &>(pPipeline),
3121                                        pPipeline->graphicsPipelineCI.pRasterizationState->lineWidth);
3122        }
3123    }
3124    // Viewport state must be included if rasterization is enabled.
3125    // If the viewport state is included, the viewport and scissor counts should always match.
3126    // NOTE : Even if these are flagged as dynamic, counts need to be set correctly for shader compiler
3127    if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
3128        (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
3129        if (!pPipeline->graphicsPipelineCI.pViewportState) {
3130            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3131                                DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", "Gfx Pipeline pViewportState is null. Even if viewport "
3132                                                                           "and scissors are dynamic PSO must include "
3133                                                                           "viewportCount and scissorCount in pViewportState.");
3134        } else if (pPipeline->graphicsPipelineCI.pViewportState->scissorCount !=
3135                   pPipeline->graphicsPipelineCI.pViewportState->viewportCount) {
3136            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3137                                DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3138                                "Gfx Pipeline viewport count (%u) must match scissor count (%u).",
3139                                pPipeline->graphicsPipelineCI.pViewportState->viewportCount,
3140                                pPipeline->graphicsPipelineCI.pViewportState->scissorCount);
3141        } else {
3142            // If viewport or scissor are not dynamic, then verify that data is appropriate for count
3143            bool dynViewport = isDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
3144            bool dynScissor = isDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
3145            if (!dynViewport) {
3146                if (pPipeline->graphicsPipelineCI.pViewportState->viewportCount &&
3147                    !pPipeline->graphicsPipelineCI.pViewportState->pViewports) {
3148                    skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3149                                        __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3150                                        "Gfx Pipeline viewportCount is %u, but pViewports is NULL. For non-zero viewportCount, you "
3151                                        "must either include pViewports data, or include viewport in pDynamicState and set it with "
3152                                        "vkCmdSetViewport().",
3153                                        pPipeline->graphicsPipelineCI.pViewportState->viewportCount);
3154                }
3155            }
3156            if (!dynScissor) {
3157                if (pPipeline->graphicsPipelineCI.pViewportState->scissorCount &&
3158                    !pPipeline->graphicsPipelineCI.pViewportState->pScissors) {
3159                    skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3160                                        __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3161                                        "Gfx Pipeline scissorCount is %u, but pScissors is NULL. For non-zero scissorCount, you "
3162                                        "must either include pScissors data, or include scissor in pDynamicState and set it with "
3163                                        "vkCmdSetScissor().",
3164                                        pPipeline->graphicsPipelineCI.pViewportState->scissorCount);
3165                }
3166            }
3167        }
3168    }
3169    return skipCall;
3170}
3171
3172// Free the Pipeline nodes
3173static void deletePipelines(layer_data *my_data) {
3174    if (my_data->pipelineMap.size() <= 0)
3175        return;
3176    for (auto &pipe_map_pair : my_data->pipelineMap) {
3177        delete pipe_map_pair.second;
3178    }
3179    my_data->pipelineMap.clear();
3180}
3181
3182// Block of code at start here specifically for managing/tracking DSs
3183
3184// Return Pool node ptr for specified pool or else NULL
3185DESCRIPTOR_POOL_NODE *getPoolNode(const layer_data *dev_data, const VkDescriptorPool pool) {
3186    auto pool_it = dev_data->descriptorPoolMap.find(pool);
3187    if (pool_it == dev_data->descriptorPoolMap.end()) {
3188        return NULL;
3189    }
3190    return pool_it->second;
3191}
3192
3193// Return false if update struct is of valid type, otherwise flag error and return code from callback
3194static bool validUpdateStruct(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
3195    switch (pUpdateStruct->sType) {
3196    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3197    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3198        return false;
3199    default:
3200        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3201                       DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3202                       "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3203                       string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
3204    }
3205}
3206
3207// Set count for given update struct in the last parameter
3208static uint32_t getUpdateCount(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
3209    switch (pUpdateStruct->sType) {
3210    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3211        return ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorCount;
3212    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3213        // TODO : Need to understand this case better and make sure code is correct
3214        return ((VkCopyDescriptorSet *)pUpdateStruct)->descriptorCount;
3215    default:
3216        return 0;
3217    }
3218}
3219
3220// For given layout and update, return the first overall index of the layout that is updated
3221static uint32_t getUpdateStartIndex(layer_data *my_data, const VkDevice device, const uint32_t binding_start_index,
3222                                    const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
3223    return binding_start_index + arrayIndex;
3224}
3225// For given layout and update, return the last overall index of the layout that is updated
3226static uint32_t getUpdateEndIndex(layer_data *my_data, const VkDevice device, const uint32_t binding_start_index,
3227                                  const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
3228    uint32_t count = getUpdateCount(my_data, device, pUpdateStruct);
3229    return binding_start_index + arrayIndex + count - 1;
3230}
3231// Verify that the descriptor type in the update struct matches what's expected by the layout
3232static bool validateUpdateConsistency(layer_data *my_data, const VkDevice device, const VkDescriptorType layout_type,
3233                                      const GENERIC_HEADER *pUpdateStruct, uint32_t startIndex, uint32_t endIndex) {
3234    // First get actual type of update
3235    bool skipCall = false;
3236    VkDescriptorType actualType = VK_DESCRIPTOR_TYPE_MAX_ENUM;
3237    switch (pUpdateStruct->sType) {
3238    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3239        actualType = ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorType;
3240        break;
3241    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3242        /* no need to validate */
3243        return false;
3244        break;
3245    default:
3246        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3247                            DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3248                            "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3249                            string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
3250    }
3251    if (!skipCall) {
3252        if (layout_type != actualType) {
3253            skipCall |= log_msg(
3254                my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3255                DRAWSTATE_DESCRIPTOR_TYPE_MISMATCH, "DS",
3256                "Write descriptor update has descriptor type %s that does not match overlapping binding descriptor type of %s!",
3257                string_VkDescriptorType(actualType), string_VkDescriptorType(layout_type));
3258        }
3259    }
3260    return skipCall;
3261}
3262//TODO: Consolidate functions
3263bool FindLayout(const GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, IMAGE_CMD_BUF_LAYOUT_NODE &node, const VkImageAspectFlags aspectMask) {
3264    layer_data *my_data = get_my_data_ptr(get_dispatch_key(pCB->commandBuffer), layer_data_map);
3265    if (!(imgpair.subresource.aspectMask & aspectMask)) {
3266        return false;
3267    }
3268    VkImageAspectFlags oldAspectMask = imgpair.subresource.aspectMask;
3269    imgpair.subresource.aspectMask = aspectMask;
3270    auto imgsubIt = pCB->imageLayoutMap.find(imgpair);
3271    if (imgsubIt == pCB->imageLayoutMap.end()) {
3272        return false;
3273    }
3274    if (node.layout != VK_IMAGE_LAYOUT_MAX_ENUM && node.layout != imgsubIt->second.layout) {
3275        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3276                reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3277                "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple layout types: %s and %s",
3278                reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(node.layout), string_VkImageLayout(imgsubIt->second.layout));
3279    }
3280    if (node.initialLayout != VK_IMAGE_LAYOUT_MAX_ENUM && node.initialLayout != imgsubIt->second.initialLayout) {
3281        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3282                reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3283                "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple initial layout types: %s and %s",
3284                reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(node.initialLayout), string_VkImageLayout(imgsubIt->second.initialLayout));
3285    }
3286    node = imgsubIt->second;
3287    return true;
3288}
3289
3290bool FindLayout(const layer_data *my_data, ImageSubresourcePair imgpair, VkImageLayout &layout, const VkImageAspectFlags aspectMask) {
3291    if (!(imgpair.subresource.aspectMask & aspectMask)) {
3292        return false;
3293    }
3294    VkImageAspectFlags oldAspectMask = imgpair.subresource.aspectMask;
3295    imgpair.subresource.aspectMask = aspectMask;
3296    auto imgsubIt = my_data->imageLayoutMap.find(imgpair);
3297    if (imgsubIt == my_data->imageLayoutMap.end()) {
3298        return false;
3299    }
3300    if (layout != VK_IMAGE_LAYOUT_MAX_ENUM && layout != imgsubIt->second.layout) {
3301        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3302                reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3303                "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple layout types: %s and %s",
3304                reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(layout), string_VkImageLayout(imgsubIt->second.layout));
3305    }
3306    layout = imgsubIt->second.layout;
3307    return true;
3308}
3309
3310// find layout(s) on the cmd buf level
3311bool FindLayout(const GLOBAL_CB_NODE *pCB, VkImage image, VkImageSubresource range, IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3312    ImageSubresourcePair imgpair = {image, true, range};
3313    node = IMAGE_CMD_BUF_LAYOUT_NODE(VK_IMAGE_LAYOUT_MAX_ENUM, VK_IMAGE_LAYOUT_MAX_ENUM);
3314    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_COLOR_BIT);
3315    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_DEPTH_BIT);
3316    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_STENCIL_BIT);
3317    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_METADATA_BIT);
3318    if (node.layout == VK_IMAGE_LAYOUT_MAX_ENUM) {
3319        imgpair = {image, false, VkImageSubresource()};
3320        auto imgsubIt = pCB->imageLayoutMap.find(imgpair);
3321        if (imgsubIt == pCB->imageLayoutMap.end())
3322            return false;
3323        node = imgsubIt->second;
3324    }
3325    return true;
3326}
3327
3328// find layout(s) on the global level
3329bool FindLayout(const layer_data *my_data, ImageSubresourcePair imgpair, VkImageLayout &layout) {
3330    layout = VK_IMAGE_LAYOUT_MAX_ENUM;
3331    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT);
3332    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT);
3333    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT);
3334    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT);
3335    if (layout == VK_IMAGE_LAYOUT_MAX_ENUM) {
3336        imgpair = {imgpair.image, false, VkImageSubresource()};
3337        auto imgsubIt = my_data->imageLayoutMap.find(imgpair);
3338        if (imgsubIt == my_data->imageLayoutMap.end())
3339            return false;
3340        layout = imgsubIt->second.layout;
3341    }
3342    return true;
3343}
3344
3345bool FindLayout(const layer_data *my_data, VkImage image, VkImageSubresource range, VkImageLayout &layout) {
3346    ImageSubresourcePair imgpair = {image, true, range};
3347    return FindLayout(my_data, imgpair, layout);
3348}
3349
3350bool FindLayouts(const layer_data *my_data, VkImage image, std::vector<VkImageLayout> &layouts) {
3351    auto sub_data = my_data->imageSubresourceMap.find(image);
3352    if (sub_data == my_data->imageSubresourceMap.end())
3353        return false;
3354    auto img_node = getImageNode(my_data, image);
3355    if (!img_node)
3356        return false;
3357    bool ignoreGlobal = false;
3358    // TODO: Make this robust for >1 aspect mask. Now it will just say ignore
3359    // potential errors in this case.
3360    if (sub_data->second.size() >= (img_node->createInfo.arrayLayers * img_node->createInfo.mipLevels + 1)) {
3361        ignoreGlobal = true;
3362    }
3363    for (auto imgsubpair : sub_data->second) {
3364        if (ignoreGlobal && !imgsubpair.hasSubresource)
3365            continue;
3366        auto img_data = my_data->imageLayoutMap.find(imgsubpair);
3367        if (img_data != my_data->imageLayoutMap.end()) {
3368            layouts.push_back(img_data->second.layout);
3369        }
3370    }
3371    return true;
3372}
3373
3374// Set the layout on the global level
3375void SetLayout(layer_data *my_data, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
3376    VkImage &image = imgpair.image;
3377    // TODO (mlentine): Maybe set format if new? Not used atm.
3378    my_data->imageLayoutMap[imgpair].layout = layout;
3379    // TODO (mlentine): Maybe make vector a set?
3380    auto subresource = std::find(my_data->imageSubresourceMap[image].begin(), my_data->imageSubresourceMap[image].end(), imgpair);
3381    if (subresource == my_data->imageSubresourceMap[image].end()) {
3382        my_data->imageSubresourceMap[image].push_back(imgpair);
3383    }
3384}
3385
3386// Set the layout on the cmdbuf level
3387void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3388    pCB->imageLayoutMap[imgpair] = node;
3389    // TODO (mlentine): Maybe make vector a set?
3390    auto subresource =
3391        std::find(pCB->imageSubresourceMap[imgpair.image].begin(), pCB->imageSubresourceMap[imgpair.image].end(), imgpair);
3392    if (subresource == pCB->imageSubresourceMap[imgpair.image].end()) {
3393        pCB->imageSubresourceMap[imgpair.image].push_back(imgpair);
3394    }
3395}
3396
3397void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
3398    // TODO (mlentine): Maybe make vector a set?
3399    if (std::find(pCB->imageSubresourceMap[imgpair.image].begin(), pCB->imageSubresourceMap[imgpair.image].end(), imgpair) !=
3400        pCB->imageSubresourceMap[imgpair.image].end()) {
3401        pCB->imageLayoutMap[imgpair].layout = layout;
3402    } else {
3403        // TODO (mlentine): Could be expensive and might need to be removed.
3404        assert(imgpair.hasSubresource);
3405        IMAGE_CMD_BUF_LAYOUT_NODE node;
3406        if (!FindLayout(pCB, imgpair.image, imgpair.subresource, node)) {
3407            node.initialLayout = layout;
3408        }
3409        SetLayout(pCB, imgpair, {node.initialLayout, layout});
3410    }
3411}
3412
3413template <class OBJECT, class LAYOUT>
3414void SetLayout(OBJECT *pObject, ImageSubresourcePair imgpair, const LAYOUT &layout, VkImageAspectFlags aspectMask) {
3415    if (imgpair.subresource.aspectMask & aspectMask) {
3416        imgpair.subresource.aspectMask = aspectMask;
3417        SetLayout(pObject, imgpair, layout);
3418    }
3419}
3420
3421template <class OBJECT, class LAYOUT>
3422void SetLayout(OBJECT *pObject, VkImage image, VkImageSubresource range, const LAYOUT &layout) {
3423    ImageSubresourcePair imgpair = {image, true, range};
3424    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT);
3425    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT);
3426    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT);
3427    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT);
3428}
3429
3430template <class OBJECT, class LAYOUT> void SetLayout(OBJECT *pObject, VkImage image, const LAYOUT &layout) {
3431    ImageSubresourcePair imgpair = {image, false, VkImageSubresource()};
3432    SetLayout(pObject, image, imgpair, layout);
3433}
3434
3435void SetLayout(const layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkImageView imageView, const VkImageLayout &layout) {
3436    auto iv_data = getImageViewData(dev_data, imageView);
3437    assert(iv_data);
3438    const VkImage &image = iv_data->image;
3439    const VkImageSubresourceRange &subRange = iv_data->subresourceRange;
3440    // TODO: Do not iterate over every possibility - consolidate where possible
3441    for (uint32_t j = 0; j < subRange.levelCount; j++) {
3442        uint32_t level = subRange.baseMipLevel + j;
3443        for (uint32_t k = 0; k < subRange.layerCount; k++) {
3444            uint32_t layer = subRange.baseArrayLayer + k;
3445            VkImageSubresource sub = {subRange.aspectMask, level, layer};
3446            SetLayout(pCB, image, sub, layout);
3447        }
3448    }
3449}
3450
3451// Validate that given set is valid and that it's not being used by an in-flight CmdBuffer
3452// func_str is the name of the calling function
3453// Return false if no errors occur
3454// Return true if validation error occurs and callback returns true (to skip upcoming API call down the chain)
3455static bool validateIdleDescriptorSet(const layer_data *my_data, VkDescriptorSet set, std::string func_str) {
3456    bool skip_call = false;
3457    auto set_node = my_data->setMap.find(set);
3458    if (set_node == my_data->setMap.end()) {
3459        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3460                             (uint64_t)(set), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
3461                             "Cannot call %s() on descriptor set 0x%" PRIxLEAST64 " that has not been allocated.", func_str.c_str(),
3462                             (uint64_t)(set));
3463    } else {
3464        if (set_node->second->in_use.load()) {
3465            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
3466                                 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)(set), __LINE__, DRAWSTATE_OBJECT_INUSE,
3467                                 "DS", "Cannot call %s() on descriptor set 0x%" PRIxLEAST64 " that is in use by a command buffer.",
3468                                 func_str.c_str(), (uint64_t)(set));
3469        }
3470    }
3471    return skip_call;
3472}
3473
3474// Remove set from setMap and delete the set
3475static void freeDescriptorSet(layer_data *dev_data, cvdescriptorset::DescriptorSet *descriptor_set) {
3476    dev_data->setMap.erase(descriptor_set->GetSet());
3477    delete descriptor_set;
3478}
3479// Free all DS Pools including their Sets & related sub-structs
3480// NOTE : Calls to this function should be wrapped in mutex
3481static void deletePools(layer_data *my_data) {
3482    if (my_data->descriptorPoolMap.size() <= 0)
3483        return;
3484    for (auto ii = my_data->descriptorPoolMap.begin(); ii != my_data->descriptorPoolMap.end(); ++ii) {
3485        // Remove this pools' sets from setMap and delete them
3486        for (auto ds : (*ii).second->sets) {
3487            freeDescriptorSet(my_data, ds);
3488        }
3489        (*ii).second->sets.clear();
3490    }
3491    my_data->descriptorPoolMap.clear();
3492}
3493
3494static void clearDescriptorPool(layer_data *my_data, const VkDevice device, const VkDescriptorPool pool,
3495                                VkDescriptorPoolResetFlags flags) {
3496    DESCRIPTOR_POOL_NODE *pPool = getPoolNode(my_data, pool);
3497    if (!pPool) {
3498        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
3499                (uint64_t)pool, __LINE__, DRAWSTATE_INVALID_POOL, "DS",
3500                "Unable to find pool node for pool 0x%" PRIxLEAST64 " specified in vkResetDescriptorPool() call", (uint64_t)pool);
3501    } else {
3502        // TODO: validate flags
3503        // For every set off of this pool, clear it, remove from setMap, and free cvdescriptorset::DescriptorSet
3504        for (auto ds : pPool->sets) {
3505            freeDescriptorSet(my_data, ds);
3506        }
3507        pPool->sets.clear();
3508        // Reset available count for each type and available sets for this pool
3509        for (uint32_t i = 0; i < pPool->availableDescriptorTypeCount.size(); ++i) {
3510            pPool->availableDescriptorTypeCount[i] = pPool->maxDescriptorTypeCount[i];
3511        }
3512        pPool->availableSets = pPool->maxSets;
3513    }
3514}
3515
3516// For given CB object, fetch associated CB Node from map
3517static GLOBAL_CB_NODE *getCBNode(layer_data const *my_data, const VkCommandBuffer cb) {
3518    auto it = my_data->commandBufferMap.find(cb);
3519    if (it == my_data->commandBufferMap.end()) {
3520        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3521                reinterpret_cast<const uint64_t &>(cb), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3522                "Attempt to use CommandBuffer 0x%" PRIxLEAST64 " that doesn't exist!", (uint64_t)(cb));
3523        return NULL;
3524    }
3525    return it->second;
3526}
3527// Free all CB Nodes
3528// NOTE : Calls to this function should be wrapped in mutex
3529static void deleteCommandBuffers(layer_data *my_data) {
3530    if (my_data->commandBufferMap.empty()) {
3531        return;
3532    }
3533    for (auto ii = my_data->commandBufferMap.begin(); ii != my_data->commandBufferMap.end(); ++ii) {
3534        delete (*ii).second;
3535    }
3536    my_data->commandBufferMap.clear();
3537}
3538
3539static bool report_error_no_cb_begin(const layer_data *dev_data, const VkCommandBuffer cb, const char *caller_name) {
3540    return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3541                   (uint64_t)cb, __LINE__, DRAWSTATE_NO_BEGIN_COMMAND_BUFFER, "DS",
3542                   "You must call vkBeginCommandBuffer() before this call to %s", caller_name);
3543}
3544
3545bool validateCmdsInCmdBuffer(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd_type) {
3546    if (!pCB->activeRenderPass)
3547        return false;
3548    bool skip_call = false;
3549    if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS &&
3550        (cmd_type != CMD_EXECUTECOMMANDS && cmd_type != CMD_NEXTSUBPASS && cmd_type != CMD_ENDRENDERPASS)) {
3551        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3552                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3553                             "Commands cannot be called in a subpass using secondary command buffers.");
3554    } else if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_INLINE && cmd_type == CMD_EXECUTECOMMANDS) {
3555        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3556                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3557                             "vkCmdExecuteCommands() cannot be called in a subpass using inline commands.");
3558    }
3559    return skip_call;
3560}
3561
3562static bool checkGraphicsBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
3563    if (!(flags & VK_QUEUE_GRAPHICS_BIT))
3564        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3565                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3566                       "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
3567    return false;
3568}
3569
3570static bool checkComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
3571    if (!(flags & VK_QUEUE_COMPUTE_BIT))
3572        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3573                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3574                       "Cannot call %s on a command buffer allocated from a pool without compute capabilities.", name);
3575    return false;
3576}
3577
3578static bool checkGraphicsOrComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
3579    if (!((flags & VK_QUEUE_GRAPHICS_BIT) || (flags & VK_QUEUE_COMPUTE_BIT)))
3580        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3581                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3582                       "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
3583    return false;
3584}
3585
3586// Add specified CMD to the CmdBuffer in given pCB, flagging errors if CB is not
3587//  in the recording state or if there's an issue with the Cmd ordering
3588static bool addCmd(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd, const char *caller_name) {
3589    bool skipCall = false;
3590    auto pool_data = my_data->commandPoolMap.find(pCB->createInfo.commandPool);
3591    if (pool_data != my_data->commandPoolMap.end()) {
3592        VkQueueFlags flags = my_data->phys_dev_properties.queue_family_properties[pool_data->second.queueFamilyIndex].queueFlags;
3593        switch (cmd) {
3594        case CMD_BINDPIPELINE:
3595        case CMD_BINDPIPELINEDELTA:
3596        case CMD_BINDDESCRIPTORSETS:
3597        case CMD_FILLBUFFER:
3598        case CMD_CLEARCOLORIMAGE:
3599        case CMD_SETEVENT:
3600        case CMD_RESETEVENT:
3601        case CMD_WAITEVENTS:
3602        case CMD_BEGINQUERY:
3603        case CMD_ENDQUERY:
3604        case CMD_RESETQUERYPOOL:
3605        case CMD_COPYQUERYPOOLRESULTS:
3606        case CMD_WRITETIMESTAMP:
3607            skipCall |= checkGraphicsOrComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
3608            break;
3609        case CMD_SETVIEWPORTSTATE:
3610        case CMD_SETSCISSORSTATE:
3611        case CMD_SETLINEWIDTHSTATE:
3612        case CMD_SETDEPTHBIASSTATE:
3613        case CMD_SETBLENDSTATE:
3614        case CMD_SETDEPTHBOUNDSSTATE:
3615        case CMD_SETSTENCILREADMASKSTATE:
3616        case CMD_SETSTENCILWRITEMASKSTATE:
3617        case CMD_SETSTENCILREFERENCESTATE:
3618        case CMD_BINDINDEXBUFFER:
3619        case CMD_BINDVERTEXBUFFER:
3620        case CMD_DRAW:
3621        case CMD_DRAWINDEXED:
3622        case CMD_DRAWINDIRECT:
3623        case CMD_DRAWINDEXEDINDIRECT:
3624        case CMD_BLITIMAGE:
3625        case CMD_CLEARATTACHMENTS:
3626        case CMD_CLEARDEPTHSTENCILIMAGE:
3627        case CMD_RESOLVEIMAGE:
3628        case CMD_BEGINRENDERPASS:
3629        case CMD_NEXTSUBPASS:
3630        case CMD_ENDRENDERPASS:
3631            skipCall |= checkGraphicsBit(my_data, flags, cmdTypeToString(cmd).c_str());
3632            break;
3633        case CMD_DISPATCH:
3634        case CMD_DISPATCHINDIRECT:
3635            skipCall |= checkComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
3636            break;
3637        case CMD_COPYBUFFER:
3638        case CMD_COPYIMAGE:
3639        case CMD_COPYBUFFERTOIMAGE:
3640        case CMD_COPYIMAGETOBUFFER:
3641        case CMD_CLONEIMAGEDATA:
3642        case CMD_UPDATEBUFFER:
3643        case CMD_PIPELINEBARRIER:
3644        case CMD_EXECUTECOMMANDS:
3645        case CMD_END:
3646            break;
3647        default:
3648            break;
3649        }
3650    }
3651    if (pCB->state != CB_RECORDING) {
3652        skipCall |= report_error_no_cb_begin(my_data, pCB->commandBuffer, caller_name);
3653    } else {
3654        skipCall |= validateCmdsInCmdBuffer(my_data, pCB, cmd);
3655        CMD_NODE cmdNode = {};
3656        // init cmd node and append to end of cmd LL
3657        cmdNode.cmdNumber = ++pCB->numCmds;
3658        cmdNode.type = cmd;
3659        pCB->cmds.push_back(cmdNode);
3660    }
3661    return skipCall;
3662}
3663// Reset the command buffer state
3664//  Maintain the createInfo and set state to CB_NEW, but clear all other state
3665static void resetCB(layer_data *dev_data, const VkCommandBuffer cb) {
3666    GLOBAL_CB_NODE *pCB = dev_data->commandBufferMap[cb];
3667    if (pCB) {
3668        pCB->in_use.store(0);
3669        pCB->cmds.clear();
3670        // Reset CB state (note that createInfo is not cleared)
3671        pCB->commandBuffer = cb;
3672        memset(&pCB->beginInfo, 0, sizeof(VkCommandBufferBeginInfo));
3673        memset(&pCB->inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo));
3674        pCB->numCmds = 0;
3675        memset(pCB->drawCount, 0, NUM_DRAW_TYPES * sizeof(uint64_t));
3676        pCB->state = CB_NEW;
3677        pCB->submitCount = 0;
3678        pCB->status = 0;
3679        pCB->viewports.clear();
3680        pCB->scissors.clear();
3681
3682        for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
3683            // Before clearing lastBoundState, remove any CB bindings from all uniqueBoundSets
3684            for (auto set : pCB->lastBound[i].uniqueBoundSets) {
3685                set->RemoveBoundCommandBuffer(pCB);
3686            }
3687            pCB->lastBound[i].reset();
3688        }
3689
3690        memset(&pCB->activeRenderPassBeginInfo, 0, sizeof(pCB->activeRenderPassBeginInfo));
3691        pCB->activeRenderPass = nullptr;
3692        pCB->activeSubpassContents = VK_SUBPASS_CONTENTS_INLINE;
3693        pCB->activeSubpass = 0;
3694        pCB->destroyedSets.clear();
3695        pCB->updatedSets.clear();
3696        pCB->destroyedFramebuffers.clear();
3697        pCB->waitedEvents.clear();
3698        pCB->events.clear();
3699        pCB->waitedEventsBeforeQueryReset.clear();
3700        pCB->queryToStateMap.clear();
3701        pCB->activeQueries.clear();
3702        pCB->startedQueries.clear();
3703        pCB->imageSubresourceMap.clear();
3704        pCB->imageLayoutMap.clear();
3705        pCB->eventToStageMap.clear();
3706        pCB->drawData.clear();
3707        pCB->currentDrawData.buffers.clear();
3708        pCB->primaryCommandBuffer = VK_NULL_HANDLE;
3709        // Make sure any secondaryCommandBuffers are removed from globalInFlight
3710        for (auto secondary_cb : pCB->secondaryCommandBuffers) {
3711            dev_data->globalInFlightCmdBuffers.erase(secondary_cb);
3712        }
3713        pCB->secondaryCommandBuffers.clear();
3714        pCB->updateImages.clear();
3715        pCB->updateBuffers.clear();
3716        clear_cmd_buf_and_mem_references(dev_data, pCB);
3717        pCB->eventUpdates.clear();
3718        pCB->queryUpdates.clear();
3719
3720        // Remove this cmdBuffer's reference from each FrameBuffer's CB ref list
3721        for (auto framebuffer : pCB->framebuffers) {
3722            auto fbNode = getFramebuffer(dev_data, framebuffer);
3723            if (fbNode)
3724                fbNode->referencingCmdBuffers.erase(pCB->commandBuffer);
3725        }
3726        pCB->framebuffers.clear();
3727        pCB->activeFramebuffer = VK_NULL_HANDLE;
3728    }
3729}
3730
3731// Set PSO-related status bits for CB, including dynamic state set via PSO
3732static void set_cb_pso_status(GLOBAL_CB_NODE *pCB, const PIPELINE_NODE *pPipe) {
3733    // Account for any dynamic state not set via this PSO
3734    if (!pPipe->graphicsPipelineCI.pDynamicState ||
3735        !pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount) { // All state is static
3736        pCB->status = CBSTATUS_ALL;
3737    } else {
3738        // First consider all state on
3739        // Then unset any state that's noted as dynamic in PSO
3740        // Finally OR that into CB statemask
3741        CBStatusFlags psoDynStateMask = CBSTATUS_ALL;
3742        for (uint32_t i = 0; i < pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
3743            switch (pPipe->graphicsPipelineCI.pDynamicState->pDynamicStates[i]) {
3744            case VK_DYNAMIC_STATE_VIEWPORT:
3745                psoDynStateMask &= ~CBSTATUS_VIEWPORT_SET;
3746                break;
3747            case VK_DYNAMIC_STATE_SCISSOR:
3748                psoDynStateMask &= ~CBSTATUS_SCISSOR_SET;
3749                break;
3750            case VK_DYNAMIC_STATE_LINE_WIDTH:
3751                psoDynStateMask &= ~CBSTATUS_LINE_WIDTH_SET;
3752                break;
3753            case VK_DYNAMIC_STATE_DEPTH_BIAS:
3754                psoDynStateMask &= ~CBSTATUS_DEPTH_BIAS_SET;
3755                break;
3756            case VK_DYNAMIC_STATE_BLEND_CONSTANTS:
3757                psoDynStateMask &= ~CBSTATUS_BLEND_CONSTANTS_SET;
3758                break;
3759            case VK_DYNAMIC_STATE_DEPTH_BOUNDS:
3760                psoDynStateMask &= ~CBSTATUS_DEPTH_BOUNDS_SET;
3761                break;
3762            case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK:
3763                psoDynStateMask &= ~CBSTATUS_STENCIL_READ_MASK_SET;
3764                break;
3765            case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK:
3766                psoDynStateMask &= ~CBSTATUS_STENCIL_WRITE_MASK_SET;
3767                break;
3768            case VK_DYNAMIC_STATE_STENCIL_REFERENCE:
3769                psoDynStateMask &= ~CBSTATUS_STENCIL_REFERENCE_SET;
3770                break;
3771            default:
3772                // TODO : Flag error here
3773                break;
3774            }
3775        }
3776        pCB->status |= psoDynStateMask;
3777    }
3778}
3779
3780// Print the last bound Gfx Pipeline
3781static bool printPipeline(layer_data *my_data, const VkCommandBuffer cb) {
3782    bool skipCall = false;
3783    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb);
3784    if (pCB) {
3785        PIPELINE_NODE *pPipeTrav = getPipeline(my_data, pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline);
3786        if (!pPipeTrav) {
3787            // nothing to print
3788        } else {
3789            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3790                                __LINE__, DRAWSTATE_NONE, "DS", "%s",
3791                                vk_print_vkgraphicspipelinecreateinfo(
3792                                    reinterpret_cast<const VkGraphicsPipelineCreateInfo *>(&pPipeTrav->graphicsPipelineCI), "{DS}")
3793                                    .c_str());
3794        }
3795    }
3796    return skipCall;
3797}
3798
3799static void printCB(layer_data *my_data, const VkCommandBuffer cb) {
3800    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb);
3801    if (pCB && pCB->cmds.size() > 0) {
3802        log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3803                DRAWSTATE_NONE, "DS", "Cmds in CB 0x%p", (void *)cb);
3804        vector<CMD_NODE> cmds = pCB->cmds;
3805        for (auto ii = cmds.begin(); ii != cmds.end(); ++ii) {
3806            // TODO : Need to pass cb as srcObj here
3807            log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
3808                    __LINE__, DRAWSTATE_NONE, "DS", "  CMD 0x%" PRIx64 ": %s", (*ii).cmdNumber, cmdTypeToString((*ii).type).c_str());
3809        }
3810    } else {
3811        // Nothing to print
3812    }
3813}
3814
3815static bool synchAndPrintDSConfig(layer_data *my_data, const VkCommandBuffer cb) {
3816    bool skipCall = false;
3817    if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
3818        return skipCall;
3819    }
3820    skipCall |= printPipeline(my_data, cb);
3821    return skipCall;
3822}
3823
3824// Flags validation error if the associated call is made inside a render pass. The apiName
3825// routine should ONLY be called outside a render pass.
3826static bool insideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName) {
3827    bool inside = false;
3828    if (pCB->activeRenderPass) {
3829        inside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3830                         (uint64_t)pCB->commandBuffer, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
3831                         "%s: It is invalid to issue this call inside an active render pass (0x%" PRIxLEAST64 ")", apiName,
3832                         (uint64_t)pCB->activeRenderPass->renderPass);
3833    }
3834    return inside;
3835}
3836
3837// Flags validation error if the associated call is made outside a render pass. The apiName
3838// routine should ONLY be called inside a render pass.
3839static bool outsideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName) {
3840    bool outside = false;
3841    if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (!pCB->activeRenderPass)) ||
3842        ((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && (!pCB->activeRenderPass) &&
3843         !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))) {
3844        outside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3845                          (uint64_t)pCB->commandBuffer, __LINE__, DRAWSTATE_NO_ACTIVE_RENDERPASS, "DS",
3846                          "%s: This call must be issued inside an active render pass.", apiName);
3847    }
3848    return outside;
3849}
3850
3851static void init_core_validation(layer_data *instance_data, const VkAllocationCallbacks *pAllocator) {
3852
3853    layer_debug_actions(instance_data->report_data, instance_data->logging_callback, pAllocator, "lunarg_core_validation");
3854
3855}
3856
3857VKAPI_ATTR VkResult VKAPI_CALL
3858CreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkInstance *pInstance) {
3859    VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
3860
3861    assert(chain_info->u.pLayerInfo);
3862    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
3863    PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
3864    if (fpCreateInstance == NULL)
3865        return VK_ERROR_INITIALIZATION_FAILED;
3866
3867    // Advance the link info for the next element on the chain
3868    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
3869
3870    VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
3871    if (result != VK_SUCCESS)
3872        return result;
3873
3874    layer_data *instance_data = get_my_data_ptr(get_dispatch_key(*pInstance), layer_data_map);
3875    instance_data->instance = *pInstance;
3876    instance_data->instance_dispatch_table = new VkLayerInstanceDispatchTable;
3877    layer_init_instance_dispatch_table(*pInstance, instance_data->instance_dispatch_table, fpGetInstanceProcAddr);
3878
3879    instance_data->report_data =
3880        debug_report_create_instance(instance_data->instance_dispatch_table, *pInstance, pCreateInfo->enabledExtensionCount,
3881                                     pCreateInfo->ppEnabledExtensionNames);
3882
3883    init_core_validation(instance_data, pAllocator);
3884
3885    ValidateLayerOrdering(*pCreateInfo);
3886
3887    return result;
3888}
3889
3890/* hook DestroyInstance to remove tableInstanceMap entry */
3891VKAPI_ATTR void VKAPI_CALL DestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
3892    // TODOSC : Shouldn't need any customization here
3893    dispatch_key key = get_dispatch_key(instance);
3894    // TBD: Need any locking this early, in case this function is called at the
3895    // same time by more than one thread?
3896    layer_data *my_data = get_my_data_ptr(key, layer_data_map);
3897    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
3898    pTable->DestroyInstance(instance, pAllocator);
3899
3900    std::lock_guard<std::mutex> lock(global_lock);
3901    // Clean up logging callback, if any
3902    while (my_data->logging_callback.size() > 0) {
3903        VkDebugReportCallbackEXT callback = my_data->logging_callback.back();
3904        layer_destroy_msg_callback(my_data->report_data, callback, pAllocator);
3905        my_data->logging_callback.pop_back();
3906    }
3907
3908    layer_debug_report_destroy_instance(my_data->report_data);
3909    delete my_data->instance_dispatch_table;
3910    layer_data_map.erase(key);
3911}
3912
3913static void createDeviceRegisterExtensions(const VkDeviceCreateInfo *pCreateInfo, VkDevice device) {
3914    uint32_t i;
3915    // TBD: Need any locking, in case this function is called at the same time
3916    // by more than one thread?
3917    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
3918    dev_data->device_extensions.wsi_enabled = false;
3919
3920    VkLayerDispatchTable *pDisp = dev_data->device_dispatch_table;
3921    PFN_vkGetDeviceProcAddr gpa = pDisp->GetDeviceProcAddr;
3922    pDisp->CreateSwapchainKHR = (PFN_vkCreateSwapchainKHR)gpa(device, "vkCreateSwapchainKHR");
3923    pDisp->DestroySwapchainKHR = (PFN_vkDestroySwapchainKHR)gpa(device, "vkDestroySwapchainKHR");
3924    pDisp->GetSwapchainImagesKHR = (PFN_vkGetSwapchainImagesKHR)gpa(device, "vkGetSwapchainImagesKHR");
3925    pDisp->AcquireNextImageKHR = (PFN_vkAcquireNextImageKHR)gpa(device, "vkAcquireNextImageKHR");
3926    pDisp->QueuePresentKHR = (PFN_vkQueuePresentKHR)gpa(device, "vkQueuePresentKHR");
3927
3928    for (i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
3929        if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME) == 0)
3930            dev_data->device_extensions.wsi_enabled = true;
3931    }
3932}
3933
3934VKAPI_ATTR VkResult VKAPI_CALL CreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
3935                                            const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
3936    layer_data *my_instance_data = get_my_data_ptr(get_dispatch_key(gpu), layer_data_map);
3937    VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
3938
3939    assert(chain_info->u.pLayerInfo);
3940    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
3941    PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
3942    PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(my_instance_data->instance, "vkCreateDevice");
3943    if (fpCreateDevice == NULL) {
3944        return VK_ERROR_INITIALIZATION_FAILED;
3945    }
3946
3947    // Advance the link info for the next element on the chain
3948    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
3949
3950    VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
3951    if (result != VK_SUCCESS) {
3952        return result;
3953    }
3954
3955    std::unique_lock<std::mutex> lock(global_lock);
3956    layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(*pDevice), layer_data_map);
3957
3958    // Setup device dispatch table
3959    my_device_data->device_dispatch_table = new VkLayerDispatchTable;
3960    layer_init_device_dispatch_table(*pDevice, my_device_data->device_dispatch_table, fpGetDeviceProcAddr);
3961    my_device_data->device = *pDevice;
3962
3963    my_device_data->report_data = layer_debug_report_create_device(my_instance_data->report_data, *pDevice);
3964    createDeviceRegisterExtensions(pCreateInfo, *pDevice);
3965    // Get physical device limits for this device
3966    my_instance_data->instance_dispatch_table->GetPhysicalDeviceProperties(gpu, &(my_device_data->phys_dev_properties.properties));
3967    uint32_t count;
3968    my_instance_data->instance_dispatch_table->GetPhysicalDeviceQueueFamilyProperties(gpu, &count, nullptr);
3969    my_device_data->phys_dev_properties.queue_family_properties.resize(count);
3970    my_instance_data->instance_dispatch_table->GetPhysicalDeviceQueueFamilyProperties(
3971        gpu, &count, &my_device_data->phys_dev_properties.queue_family_properties[0]);
3972    // TODO: device limits should make sure these are compatible
3973    if (pCreateInfo->pEnabledFeatures) {
3974        my_device_data->phys_dev_properties.features = *pCreateInfo->pEnabledFeatures;
3975    } else {
3976        memset(&my_device_data->phys_dev_properties.features, 0, sizeof(VkPhysicalDeviceFeatures));
3977    }
3978    // Store physical device mem limits into device layer_data struct
3979    my_instance_data->instance_dispatch_table->GetPhysicalDeviceMemoryProperties(gpu, &my_device_data->phys_dev_mem_props);
3980    lock.unlock();
3981
3982    ValidateLayerOrdering(*pCreateInfo);
3983
3984    return result;
3985}
3986
3987// prototype
3988static void deleteRenderPasses(layer_data *);
3989VKAPI_ATTR void VKAPI_CALL DestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
3990    // TODOSC : Shouldn't need any customization here
3991    dispatch_key key = get_dispatch_key(device);
3992    layer_data *dev_data = get_my_data_ptr(key, layer_data_map);
3993    // Free all the memory
3994    std::unique_lock<std::mutex> lock(global_lock);
3995    deletePipelines(dev_data);
3996    deleteRenderPasses(dev_data);
3997    deleteCommandBuffers(dev_data);
3998    // This will also delete all sets in the pool & remove them from setMap
3999    deletePools(dev_data);
4000    // All sets should be removed
4001    assert(dev_data->setMap.empty());
4002    for (auto del_layout : dev_data->descriptorSetLayoutMap) {
4003        delete del_layout.second;
4004    }
4005    dev_data->descriptorSetLayoutMap.clear();
4006    dev_data->imageViewMap.clear();
4007    dev_data->imageMap.clear();
4008    dev_data->imageSubresourceMap.clear();
4009    dev_data->imageLayoutMap.clear();
4010    dev_data->bufferViewMap.clear();
4011    dev_data->bufferMap.clear();
4012    // Queues persist until device is destroyed
4013    dev_data->queueMap.clear();
4014    lock.unlock();
4015#if MTMERGESOURCE
4016    bool skipCall = false;
4017    lock.lock();
4018    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
4019            (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM", "Printing List details prior to vkDestroyDevice()");
4020    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
4021            (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM", "================================================");
4022    print_mem_list(dev_data);
4023    printCBList(dev_data);
4024    // Report any memory leaks
4025    DEVICE_MEM_INFO *pInfo = NULL;
4026    if (!dev_data->memObjMap.empty()) {
4027        for (auto ii = dev_data->memObjMap.begin(); ii != dev_data->memObjMap.end(); ++ii) {
4028            pInfo = (*ii).second.get();
4029            if (pInfo->allocInfo.allocationSize != 0) {
4030                // Valid Usage: All child objects created on device must have been destroyed prior to destroying device
4031                skipCall |=
4032                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4033                            VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pInfo->mem, __LINE__, MEMTRACK_MEMORY_LEAK,
4034                            "MEM", "Mem Object 0x%" PRIx64 " has not been freed. You should clean up this memory by calling "
4035                                   "vkFreeMemory(0x%" PRIx64 ") prior to vkDestroyDevice().",
4036                            (uint64_t)(pInfo->mem), (uint64_t)(pInfo->mem));
4037            }
4038        }
4039    }
4040    layer_debug_report_destroy_device(device);
4041    lock.unlock();
4042
4043#if DISPATCH_MAP_DEBUG
4044    fprintf(stderr, "Device: 0x%p, key: 0x%p\n", device, key);
4045#endif
4046    VkLayerDispatchTable *pDisp = dev_data->device_dispatch_table;
4047    if (!skipCall) {
4048        pDisp->DestroyDevice(device, pAllocator);
4049    }
4050#else
4051    dev_data->device_dispatch_table->DestroyDevice(device, pAllocator);
4052#endif
4053    delete dev_data->device_dispatch_table;
4054    layer_data_map.erase(key);
4055}
4056
4057static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION}};
4058
4059// This validates that the initial layout specified in the command buffer for
4060// the IMAGE is the same
4061// as the global IMAGE layout
4062static bool ValidateCmdBufImageLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
4063    bool skip_call = false;
4064    for (auto cb_image_data : pCB->imageLayoutMap) {
4065        VkImageLayout imageLayout;
4066        if (!FindLayout(dev_data, cb_image_data.first, imageLayout)) {
4067            skip_call |=
4068                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4069                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot submit cmd buffer using deleted image 0x%" PRIx64 ".",
4070                        reinterpret_cast<const uint64_t &>(cb_image_data.first));
4071        } else {
4072            if (cb_image_data.second.initialLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
4073                // TODO: Set memory invalid which is in mem_tracker currently
4074            } else if (imageLayout != cb_image_data.second.initialLayout) {
4075                if (cb_image_data.first.hasSubresource) {
4076                    skip_call |= log_msg(
4077                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4078                        reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
4079                        "Cannot submit cmd buffer using image (0x%" PRIx64 ") [sub-resource: aspectMask 0x%X array layer %u, mip level %u], "
4080                        "with layout %s when first use is %s.",
4081                        reinterpret_cast<const uint64_t &>(cb_image_data.first.image), cb_image_data.first.subresource.aspectMask,
4082                                cb_image_data.first.subresource.arrayLayer,
4083                                cb_image_data.first.subresource.mipLevel, string_VkImageLayout(imageLayout),
4084                        string_VkImageLayout(cb_image_data.second.initialLayout));
4085                } else {
4086                    skip_call |= log_msg(
4087                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4088                        reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
4089                        "Cannot submit cmd buffer using image (0x%" PRIx64 ") with layout %s when "
4090                        "first use is %s.",
4091                        reinterpret_cast<const uint64_t &>(cb_image_data.first.image), string_VkImageLayout(imageLayout),
4092                        string_VkImageLayout(cb_image_data.second.initialLayout));
4093                }
4094            }
4095            SetLayout(dev_data, cb_image_data.first, cb_image_data.second.layout);
4096        }
4097    }
4098    return skip_call;
4099}
4100
4101// Track which resources are in-flight by atomically incrementing their "in_use" count
4102static bool validateAndIncrementResources(layer_data *my_data, GLOBAL_CB_NODE *pCB, std::vector<VkSemaphore> const &semaphores) {
4103    bool skip_call = false;
4104    for (auto drawDataElement : pCB->drawData) {
4105        for (auto buffer : drawDataElement.buffers) {
4106            auto buffer_node = getBufferNode(my_data, buffer);
4107            if (!buffer_node) {
4108                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
4109                                     (uint64_t)(buffer), __LINE__, DRAWSTATE_INVALID_BUFFER, "DS",
4110                                     "Cannot submit cmd buffer using deleted buffer 0x%" PRIx64 ".", (uint64_t)(buffer));
4111            } else {
4112                buffer_node->in_use.fetch_add(1);
4113            }
4114        }
4115    }
4116    for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
4117        for (auto set : pCB->lastBound[i].uniqueBoundSets) {
4118            if (!my_data->setMap.count(set->GetSet())) {
4119                skip_call |=
4120                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4121                            (uint64_t)(set), __LINE__, DRAWSTATE_INVALID_DESCRIPTOR_SET, "DS",
4122                            "Cannot submit cmd buffer using deleted descriptor set 0x%" PRIx64 ".", (uint64_t)(set));
4123            } else {
4124                set->in_use.fetch_add(1);
4125            }
4126        }
4127    }
4128    for (auto semaphore : semaphores) {
4129        auto semaphoreNode = my_data->semaphoreMap.find(semaphore);
4130        if (semaphoreNode == my_data->semaphoreMap.end()) {
4131            skip_call |=
4132                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4133                        reinterpret_cast<uint64_t &>(semaphore), __LINE__, DRAWSTATE_INVALID_SEMAPHORE, "DS",
4134                        "Cannot submit cmd buffer using deleted semaphore 0x%" PRIx64 ".", reinterpret_cast<uint64_t &>(semaphore));
4135        } else {
4136            semaphoreNode->second.in_use.fetch_add(1);
4137        }
4138    }
4139    for (auto event : pCB->events) {
4140        auto eventNode = my_data->eventMap.find(event);
4141        if (eventNode == my_data->eventMap.end()) {
4142            skip_call |=
4143                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4144                        reinterpret_cast<uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
4145                        "Cannot submit cmd buffer using deleted event 0x%" PRIx64 ".", reinterpret_cast<uint64_t &>(event));
4146        } else {
4147            eventNode->second.in_use.fetch_add(1);
4148        }
4149    }
4150    for (auto event : pCB->writeEventsBeforeWait) {
4151        auto eventNode = my_data->eventMap.find(event);
4152        eventNode->second.write_in_use++;
4153    }
4154    return skip_call;
4155}
4156
4157// Note: This function assumes that the global lock is held by the calling
4158// thread.
4159static bool cleanInFlightCmdBuffer(layer_data *my_data, VkCommandBuffer cmdBuffer) {
4160    bool skip_call = false;
4161    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cmdBuffer);
4162    if (pCB) {
4163        for (auto queryEventsPair : pCB->waitedEventsBeforeQueryReset) {
4164            for (auto event : queryEventsPair.second) {
4165                if (my_data->eventMap[event].needsSignaled) {
4166                    skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4167                                         VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, 0, DRAWSTATE_INVALID_QUERY, "DS",
4168                                         "Cannot get query results on queryPool 0x%" PRIx64
4169                                         " with index %d which was guarded by unsignaled event 0x%" PRIx64 ".",
4170                                         (uint64_t)(queryEventsPair.first.pool), queryEventsPair.first.index, (uint64_t)(event));
4171                }
4172            }
4173        }
4174    }
4175    return skip_call;
4176}
4177// Decrement cmd_buffer in_use and if it goes to 0 remove cmd_buffer from globalInFlightCmdBuffers
4178static inline void removeInFlightCmdBuffer(layer_data *dev_data, VkCommandBuffer cmd_buffer) {
4179    // Pull it off of global list initially, but if we find it in any other queue list, add it back in
4180    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmd_buffer);
4181    pCB->in_use.fetch_sub(1);
4182    if (!pCB->in_use.load()) {
4183        dev_data->globalInFlightCmdBuffers.erase(cmd_buffer);
4184    }
4185}
4186
4187static void decrementResources(layer_data *my_data, CB_SUBMISSION *submission) {
4188    GLOBAL_CB_NODE *pCB = getCBNode(my_data, submission->cb);
4189    for (auto drawDataElement : pCB->drawData) {
4190        for (auto buffer : drawDataElement.buffers) {
4191            auto buffer_node = getBufferNode(my_data, buffer);
4192            if (buffer_node) {
4193                buffer_node->in_use.fetch_sub(1);
4194            }
4195        }
4196    }
4197    for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
4198        for (auto set : pCB->lastBound[i].uniqueBoundSets) {
4199            set->in_use.fetch_sub(1);
4200        }
4201    }
4202    for (auto semaphore : submission->semaphores) {
4203        auto semaphoreNode = my_data->semaphoreMap.find(semaphore);
4204        if (semaphoreNode != my_data->semaphoreMap.end()) {
4205            semaphoreNode->second.in_use.fetch_sub(1);
4206        }
4207    }
4208    for (auto event : pCB->events) {
4209        auto eventNode = my_data->eventMap.find(event);
4210        if (eventNode != my_data->eventMap.end()) {
4211            eventNode->second.in_use.fetch_sub(1);
4212        }
4213    }
4214    for (auto event : pCB->writeEventsBeforeWait) {
4215        auto eventNode = my_data->eventMap.find(event);
4216        if (eventNode != my_data->eventMap.end()) {
4217            eventNode->second.write_in_use--;
4218        }
4219    }
4220    for (auto queryStatePair : pCB->queryToStateMap) {
4221        my_data->queryToStateMap[queryStatePair.first] = queryStatePair.second;
4222    }
4223    for (auto eventStagePair : pCB->eventToStageMap) {
4224        my_data->eventMap[eventStagePair.first].stageMask = eventStagePair.second;
4225    }
4226}
4227// For fenceCount fences in pFences, mark fence signaled, decrement in_use, and call
4228//  decrementResources for all priorFences and cmdBuffers associated with fence.
4229static bool decrementResources(layer_data *my_data, uint32_t fenceCount, const VkFence *pFences) {
4230    bool skip_call = false;
4231    std::vector<std::pair<VkFence, FENCE_NODE *>> fence_pairs;
4232    for (uint32_t i = 0; i < fenceCount; ++i) {
4233        auto fence_data = my_data->fenceMap.find(pFences[i]);
4234        if (fence_data == my_data->fenceMap.end() || !fence_data->second.needsSignaled)
4235            return skip_call;
4236        fence_data->second.needsSignaled = false;
4237        if (fence_data->second.in_use.load()) {
4238            fence_pairs.push_back(std::make_pair(fence_data->first, &fence_data->second));
4239            fence_data->second.in_use.fetch_sub(1);
4240        }
4241        decrementResources(my_data, static_cast<uint32_t>(fence_data->second.priorFences.size()),
4242                           fence_data->second.priorFences.data());
4243        for (auto & submission : fence_data->second.submissions) {
4244            decrementResources(my_data, &submission);
4245            skip_call |= cleanInFlightCmdBuffer(my_data, submission.cb);
4246            removeInFlightCmdBuffer(my_data, submission.cb);
4247        }
4248        fence_data->second.submissions.clear();
4249        fence_data->second.priorFences.clear();
4250    }
4251    for (auto fence_pair : fence_pairs) {
4252        for (auto queue : fence_pair.second->queues) {
4253            auto queue_pair = my_data->queueMap.find(queue);
4254            if (queue_pair != my_data->queueMap.end()) {
4255                auto last_fence_data =
4256                    std::find(queue_pair->second.lastFences.begin(), queue_pair->second.lastFences.end(), fence_pair.first);
4257                if (last_fence_data != queue_pair->second.lastFences.end())
4258                    queue_pair->second.lastFences.erase(last_fence_data);
4259            }
4260        }
4261        for (auto& fence_data : my_data->fenceMap) {
4262          auto prior_fence_data =
4263              std::find(fence_data.second.priorFences.begin(), fence_data.second.priorFences.end(), fence_pair.first);
4264          if (prior_fence_data != fence_data.second.priorFences.end())
4265              fence_data.second.priorFences.erase(prior_fence_data);
4266        }
4267    }
4268    return skip_call;
4269}
4270// Decrement in_use for all outstanding cmd buffers that were submitted on this queue
4271static bool decrementResources(layer_data *my_data, VkQueue queue) {
4272    bool skip_call = false;
4273    auto queue_data = my_data->queueMap.find(queue);
4274    if (queue_data != my_data->queueMap.end()) {
4275        for (auto & submission : queue_data->second.untrackedSubmissions) {
4276            decrementResources(my_data, &submission);
4277            skip_call |= cleanInFlightCmdBuffer(my_data, submission.cb);
4278            removeInFlightCmdBuffer(my_data, submission.cb);
4279        }
4280        queue_data->second.untrackedSubmissions.clear();
4281        skip_call |= decrementResources(my_data, static_cast<uint32_t>(queue_data->second.lastFences.size()),
4282                                        queue_data->second.lastFences.data());
4283    }
4284    return skip_call;
4285}
4286
4287// This function merges command buffer tracking between queues when there is a semaphore dependency
4288// between them (see below for details as to how tracking works). When this happens, the prior
4289// fences from the signaling queue are merged into the wait queue as well as any untracked command
4290// buffers.
4291static void updateTrackedCommandBuffers(layer_data *dev_data, VkQueue queue, VkQueue other_queue, VkFence fence) {
4292    if (queue == other_queue) {
4293        return;
4294    }
4295    auto queue_data = dev_data->queueMap.find(queue);
4296    auto other_queue_data = dev_data->queueMap.find(other_queue);
4297    if (queue_data == dev_data->queueMap.end() || other_queue_data == dev_data->queueMap.end()) {
4298        return;
4299    }
4300    for (auto fenceInner : other_queue_data->second.lastFences) {
4301        queue_data->second.lastFences.push_back(fenceInner);
4302        auto fence_node = dev_data->fenceMap.find(fenceInner);
4303        if (fence_node != dev_data->fenceMap.end()) {
4304            fence_node->second.queues.insert(other_queue_data->first);
4305        }
4306    }
4307    // TODO: Stealing the untracked CBs out of the signaling queue isn't really
4308    // correct. A subsequent submission + wait, or a QWI on that queue, or
4309    // another semaphore dependency to a third queue may /all/ provide
4310    // suitable proof that the work we're stealing here has completed on the
4311    // device, but we've lost that information by moving the tracking between
4312    // queues.
4313    if (fence != VK_NULL_HANDLE) {
4314        auto fence_data = dev_data->fenceMap.find(fence);
4315        if (fence_data == dev_data->fenceMap.end()) {
4316            return;
4317        }
4318        for (auto cmdbuffer : other_queue_data->second.untrackedSubmissions) {
4319            fence_data->second.submissions.push_back(cmdbuffer);
4320        }
4321        other_queue_data->second.untrackedSubmissions.clear();
4322    } else {
4323        for (auto cmdbuffer : other_queue_data->second.untrackedSubmissions) {
4324            queue_data->second.untrackedSubmissions.push_back(cmdbuffer);
4325        }
4326        other_queue_data->second.untrackedSubmissions.clear();
4327    }
4328    for (auto eventStagePair : other_queue_data->second.eventToStageMap) {
4329        queue_data->second.eventToStageMap[eventStagePair.first] = eventStagePair.second;
4330    }
4331    for (auto queryStatePair : other_queue_data->second.queryToStateMap) {
4332        queue_data->second.queryToStateMap[queryStatePair.first] = queryStatePair.second;
4333    }
4334}
4335
4336// This is the core function for tracking command buffers. There are two primary ways command
4337// buffers are tracked. When submitted they are stored in the command buffer list associated
4338// with a fence or the untracked command buffer list associated with a queue if no fence is used.
4339// Each queue also stores the last fence that was submitted onto the queue. This allows us to
4340// create a linked list of fences and their associated command buffers so if one fence is
4341// waited on, prior fences on that queue are also considered to have been waited on. When a fence is
4342// waited on (either via a queue, device or fence), we free the cmd buffers for that fence and
4343// recursively call with the prior fences.
4344
4345
4346// Submit a fence to a queue, delimiting previous fences and previous untracked
4347// work by it.
4348static void
4349SubmitFence(QUEUE_NODE *pQueue, FENCE_NODE *pFence)
4350{
4351    assert(!pFence->priorFences.size());
4352    assert(!pFence->submissions.size());
4353
4354    std::swap(pFence->priorFences, pQueue->lastFences);
4355    std::swap(pFence->submissions, pQueue->untrackedSubmissions);
4356
4357    pFence->queues.insert(pQueue->queue);
4358    pFence->needsSignaled = true;
4359    pFence->in_use.fetch_add(1);
4360
4361    pQueue->lastFences.push_back(pFence->fence);
4362}
4363
4364static void markCommandBuffersInFlight(layer_data *my_data, VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits,
4365                                       VkFence fence) {
4366    auto queue_data = my_data->queueMap.find(queue);
4367    if (queue_data != my_data->queueMap.end()) {
4368        for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
4369            const VkSubmitInfo *submit = &pSubmits[submit_idx];
4370            for (uint32_t i = 0; i < submit->commandBufferCount; ++i) {
4371                // Add cmdBuffers to the global set and increment count
4372                GLOBAL_CB_NODE *pCB = getCBNode(my_data, submit->pCommandBuffers[i]);
4373                for (auto secondaryCmdBuffer : my_data->commandBufferMap[submit->pCommandBuffers[i]]->secondaryCommandBuffers) {
4374                    my_data->globalInFlightCmdBuffers.insert(secondaryCmdBuffer);
4375                    GLOBAL_CB_NODE *pSubCB = getCBNode(my_data, secondaryCmdBuffer);
4376                    pSubCB->in_use.fetch_add(1);
4377                }
4378                my_data->globalInFlightCmdBuffers.insert(submit->pCommandBuffers[i]);
4379                pCB->in_use.fetch_add(1);
4380            }
4381        }
4382    }
4383}
4384
4385static bool validateCommandBufferSimultaneousUse(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
4386    bool skip_call = false;
4387    if (dev_data->globalInFlightCmdBuffers.count(pCB->commandBuffer) &&
4388        !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
4389        skip_call |=
4390            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4391                    __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
4392                    "Command Buffer 0x%" PRIx64 " is already in use and is not marked for simultaneous use.",
4393                    reinterpret_cast<uint64_t>(pCB->commandBuffer));
4394    }
4395    return skip_call;
4396}
4397
4398static bool validateCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
4399    bool skipCall = false;
4400    // Validate ONE_TIME_SUBMIT_BIT CB is not being submitted more than once
4401    if ((pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) && (pCB->submitCount > 1)) {
4402        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4403                            __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
4404                            "CB 0x%" PRIxLEAST64 " was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT "
4405                            "set, but has been submitted 0x%" PRIxLEAST64 " times.",
4406                            (uint64_t)(pCB->commandBuffer), pCB->submitCount);
4407    }
4408    // Validate that cmd buffers have been updated
4409    if (CB_RECORDED != pCB->state) {
4410        if (CB_INVALID == pCB->state) {
4411            // Inform app of reason CB invalid
4412            bool causeReported = false;
4413            if (!pCB->destroyedSets.empty()) {
4414                std::stringstream set_string;
4415                for (auto set : pCB->destroyedSets)
4416                    set_string << " " << set;
4417
4418                skipCall |=
4419                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4420                            (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4421                            "You are submitting command buffer 0x%" PRIxLEAST64
4422                            " that is invalid because it had the following bound descriptor set(s) destroyed: %s",
4423                            (uint64_t)(pCB->commandBuffer), set_string.str().c_str());
4424                causeReported = true;
4425            }
4426            if (!pCB->updatedSets.empty()) {
4427                std::stringstream set_string;
4428                for (auto set : pCB->updatedSets)
4429                    set_string << " " << set;
4430
4431                skipCall |=
4432                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4433                            (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4434                            "You are submitting command buffer 0x%" PRIxLEAST64
4435                            " that is invalid because it had the following bound descriptor set(s) updated: %s",
4436                            (uint64_t)(pCB->commandBuffer), set_string.str().c_str());
4437                causeReported = true;
4438            }
4439            if (!pCB->destroyedFramebuffers.empty()) {
4440                std::stringstream fb_string;
4441                for (auto fb : pCB->destroyedFramebuffers)
4442                    fb_string << " " << fb;
4443
4444                skipCall |=
4445                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4446                            reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4447                            "You are submitting command buffer 0x%" PRIxLEAST64 " that is invalid because it had the following "
4448                            "referenced framebuffers destroyed: %s",
4449                            reinterpret_cast<uint64_t &>(pCB->commandBuffer), fb_string.str().c_str());
4450                causeReported = true;
4451            }
4452            // TODO : This is defensive programming to make sure an error is
4453            //  flagged if we hit this INVALID cmd buffer case and none of the
4454            //  above cases are hit. As the number of INVALID cases grows, this
4455            //  code should be updated to seemlessly handle all the cases.
4456            if (!causeReported) {
4457                skipCall |= log_msg(
4458                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4459                    reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4460                    "You are submitting command buffer 0x%" PRIxLEAST64 " that is invalid due to an unknown cause. Validation "
4461                    "should "
4462                    "be improved to report the exact cause.",
4463                    reinterpret_cast<uint64_t &>(pCB->commandBuffer));
4464            }
4465        } else { // Flag error for using CB w/o vkEndCommandBuffer() called
4466            skipCall |=
4467                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4468                        (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_NO_END_COMMAND_BUFFER, "DS",
4469                        "You must call vkEndCommandBuffer() on CB 0x%" PRIxLEAST64 " before this call to vkQueueSubmit()!",
4470                        (uint64_t)(pCB->commandBuffer));
4471        }
4472    }
4473    return skipCall;
4474}
4475
4476static bool validatePrimaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, std::vector<VkSemaphore> const &semaphores) {
4477    // Track in-use for resources off of primary and any secondary CBs
4478    bool skipCall = validateAndIncrementResources(dev_data, pCB, semaphores);
4479    if (!pCB->secondaryCommandBuffers.empty()) {
4480        for (auto secondaryCmdBuffer : pCB->secondaryCommandBuffers) {
4481            skipCall |= validateAndIncrementResources(dev_data, dev_data->commandBufferMap[secondaryCmdBuffer], semaphores);
4482            GLOBAL_CB_NODE *pSubCB = getCBNode(dev_data, secondaryCmdBuffer);
4483            if ((pSubCB->primaryCommandBuffer != pCB->commandBuffer) &&
4484                !(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
4485                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4486                        __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
4487                        "CB 0x%" PRIxLEAST64 " was submitted with secondary buffer 0x%" PRIxLEAST64
4488                        " but that buffer has subsequently been bound to "
4489                        "primary cmd buffer 0x%" PRIxLEAST64
4490                        " and it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set.",
4491                        reinterpret_cast<uint64_t>(pCB->commandBuffer), reinterpret_cast<uint64_t>(secondaryCmdBuffer),
4492                        reinterpret_cast<uint64_t>(pSubCB->primaryCommandBuffer));
4493            }
4494        }
4495    }
4496    skipCall |= validateCommandBufferState(dev_data, pCB);
4497    // If USAGE_SIMULTANEOUS_USE_BIT not set then CB cannot already be executing
4498    // on device
4499    skipCall |= validateCommandBufferSimultaneousUse(dev_data, pCB);
4500    return skipCall;
4501}
4502
4503static bool
4504ValidateFenceForSubmit(layer_data *dev_data, FENCE_NODE *pFence)
4505{
4506    bool skipCall = false;
4507
4508    if (pFence) {
4509        if (pFence->in_use.load()) {
4510            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4511                                (uint64_t)(pFence->fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
4512                                "Fence 0x%" PRIx64 " is already in use by another submission.", (uint64_t)(pFence->fence));
4513        }
4514
4515        if (!pFence->needsSignaled) {
4516            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4517                                reinterpret_cast<uint64_t &>(pFence->fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
4518                                "Fence 0x%" PRIxLEAST64 " submitted in SIGNALED state.  Fences must be reset before being submitted",
4519                                reinterpret_cast<uint64_t &>(pFence->fence));
4520        }
4521    }
4522
4523    return skipCall;
4524}
4525
4526
4527VKAPI_ATTR VkResult VKAPI_CALL
4528QueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
4529    bool skipCall = false;
4530    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
4531    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
4532    std::unique_lock<std::mutex> lock(global_lock);
4533
4534    auto pQueue = getQueueNode(dev_data, queue);
4535    auto pFence = getFenceNode(dev_data, fence);
4536    skipCall |= ValidateFenceForSubmit(dev_data, pFence);
4537
4538    if (skipCall) {
4539        return VK_ERROR_VALIDATION_FAILED_EXT;
4540    }
4541
4542    // TODO : Review these old print functions and clean up as appropriate
4543    print_mem_list(dev_data);
4544    printCBList(dev_data);
4545
4546    // Mark the fence in-use.
4547    if (pFence) {
4548        SubmitFence(pQueue, pFence);
4549    }
4550
4551    // If a fence is supplied, all the command buffers for this call will be
4552    // delimited by that fence. Otherwise, they go in the untracked portion of
4553    // the queue, and may end up being delimited by a fence supplied in a
4554    // subsequent submission.
4555    auto & submitTarget = pFence ? pFence->submissions : pQueue->untrackedSubmissions;
4556
4557    // Now verify each individual submit
4558    std::unordered_set<VkQueue> processed_other_queues;
4559    for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
4560        const VkSubmitInfo *submit = &pSubmits[submit_idx];
4561        vector<VkSemaphore> semaphoreList;
4562        for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
4563            const VkSemaphore &semaphore = submit->pWaitSemaphores[i];
4564            semaphoreList.push_back(semaphore);
4565            if (dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) {
4566                if (dev_data->semaphoreMap[semaphore].signaled) {
4567                    dev_data->semaphoreMap[semaphore].signaled = false;
4568                } else {
4569                    skipCall |=
4570                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
4571                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
4572                                "Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.",
4573                                reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
4574                }
4575                const VkQueue &other_queue = dev_data->semaphoreMap[semaphore].queue;
4576                if (other_queue != VK_NULL_HANDLE && !processed_other_queues.count(other_queue)) {
4577                    updateTrackedCommandBuffers(dev_data, queue, other_queue, fence);
4578                    processed_other_queues.insert(other_queue);
4579                }
4580            }
4581        }
4582        for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
4583            const VkSemaphore &semaphore = submit->pSignalSemaphores[i];
4584            if (dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) {
4585                semaphoreList.push_back(semaphore);
4586                if (dev_data->semaphoreMap[semaphore].signaled) {
4587                    skipCall |=
4588                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
4589                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
4590                                "Queue 0x%" PRIx64 " is signaling semaphore 0x%" PRIx64
4591                                " that has already been signaled but not waited on by queue 0x%" PRIx64 ".",
4592                                reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore),
4593                                reinterpret_cast<uint64_t &>(dev_data->semaphoreMap[semaphore].queue));
4594                } else {
4595                    dev_data->semaphoreMap[semaphore].signaled = true;
4596                    dev_data->semaphoreMap[semaphore].queue = queue;
4597                }
4598            }
4599        }
4600
4601        // TODO: just add one submission per VkSubmitInfo!
4602        for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
4603            auto pCBNode = getCBNode(dev_data, submit->pCommandBuffers[i]);
4604            skipCall |= ValidateCmdBufImageLayouts(dev_data, pCBNode);
4605            if (pCBNode) {
4606
4607                submitTarget.emplace_back(pCBNode->commandBuffer, semaphoreList);
4608                for (auto secondaryCmdBuffer : pCBNode->secondaryCommandBuffers) {
4609                    submitTarget.emplace_back(secondaryCmdBuffer, semaphoreList);
4610                }
4611
4612                pCBNode->submitCount++; // increment submit count
4613                skipCall |= validatePrimaryCommandBufferState(dev_data, pCBNode, semaphoreList);
4614                // Call submit-time functions to validate/update state
4615                for (auto &function : pCBNode->validate_functions) {
4616                    skipCall |= function();
4617                }
4618                for (auto &function : pCBNode->eventUpdates) {
4619                    skipCall |= function(queue);
4620                }
4621                for (auto &function : pCBNode->queryUpdates) {
4622                    skipCall |= function(queue);
4623                }
4624            }
4625        }
4626    }
4627    markCommandBuffersInFlight(dev_data, queue, submitCount, pSubmits, fence);
4628    lock.unlock();
4629    if (!skipCall)
4630        result = dev_data->device_dispatch_table->QueueSubmit(queue, submitCount, pSubmits, fence);
4631
4632    return result;
4633}
4634
4635VKAPI_ATTR VkResult VKAPI_CALL AllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
4636                                              const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) {
4637    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4638    VkResult result = my_data->device_dispatch_table->AllocateMemory(device, pAllocateInfo, pAllocator, pMemory);
4639    // TODO : Track allocations and overall size here
4640    std::lock_guard<std::mutex> lock(global_lock);
4641    add_mem_obj_info(my_data, device, *pMemory, pAllocateInfo);
4642    print_mem_list(my_data);
4643    return result;
4644}
4645
4646VKAPI_ATTR void VKAPI_CALL
4647FreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
4648    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4649
4650    // From spec : A memory object is freed by calling vkFreeMemory() when it is no longer needed.
4651    // Before freeing a memory object, an application must ensure the memory object is no longer
4652    // in use by the device—for example by command buffers queued for execution. The memory need
4653    // not yet be unbound from all images and buffers, but any further use of those images or
4654    // buffers (on host or device) for anything other than destroying those objects will result in
4655    // undefined behavior.
4656
4657    std::unique_lock<std::mutex> lock(global_lock);
4658    freeMemObjInfo(my_data, device, mem, false);
4659    print_mem_list(my_data);
4660    printCBList(my_data);
4661    lock.unlock();
4662    my_data->device_dispatch_table->FreeMemory(device, mem, pAllocator);
4663}
4664
4665static bool validateMemRange(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
4666    bool skipCall = false;
4667
4668    if (size == 0) {
4669        skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
4670                           (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
4671                           "VkMapMemory: Attempting to map memory range of size zero");
4672    }
4673
4674    auto mem_element = my_data->memObjMap.find(mem);
4675    if (mem_element != my_data->memObjMap.end()) {
4676        auto mem_info = mem_element->second.get();
4677        // It is an application error to call VkMapMemory on an object that is already mapped
4678        if (mem_info->memRange.size != 0) {
4679            skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
4680                               (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
4681                               "VkMapMemory: Attempting to map memory on an already-mapped object 0x%" PRIxLEAST64, (uint64_t)mem);
4682        }
4683
4684        // Validate that offset + size is within object's allocationSize
4685        if (size == VK_WHOLE_SIZE) {
4686            if (offset >= mem_info->allocInfo.allocationSize) {
4687                skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4688                                   VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP,
4689                                   "MEM", "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64
4690                                          " with size of VK_WHOLE_SIZE oversteps total array size 0x%" PRIx64,
4691                                   offset, mem_info->allocInfo.allocationSize, mem_info->allocInfo.allocationSize);
4692            }
4693        } else {
4694            if ((offset + size) > mem_info->allocInfo.allocationSize) {
4695                skipCall =
4696                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
4697                            (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
4698                            "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64 " oversteps total array size 0x%" PRIx64, offset,
4699                            size + offset, mem_info->allocInfo.allocationSize);
4700            }
4701        }
4702    }
4703    return skipCall;
4704}
4705
4706static void storeMemRanges(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
4707    auto mem_info = getMemObjInfo(my_data, mem);
4708    if (mem_info) {
4709        mem_info->memRange.offset = offset;
4710        mem_info->memRange.size = size;
4711    }
4712}
4713
4714static bool deleteMemRanges(layer_data *my_data, VkDeviceMemory mem) {
4715    bool skipCall = false;
4716    auto mem_info = getMemObjInfo(my_data, mem);
4717    if (mem_info) {
4718        if (!mem_info->memRange.size) {
4719            // Valid Usage: memory must currently be mapped
4720            skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
4721                               (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
4722                               "Unmapping Memory without memory being mapped: mem obj 0x%" PRIxLEAST64, (uint64_t)mem);
4723        }
4724        mem_info->memRange.size = 0;
4725        if (mem_info->pData) {
4726            free(mem_info->pData);
4727            mem_info->pData = 0;
4728        }
4729    }
4730    return skipCall;
4731}
4732
4733static char NoncoherentMemoryFillValue = 0xb;
4734
4735static void initializeAndTrackMemory(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize size, void **ppData) {
4736    auto mem_info = getMemObjInfo(dev_data, mem);
4737    if (mem_info) {
4738        mem_info->pDriverData = *ppData;
4739        uint32_t index = mem_info->allocInfo.memoryTypeIndex;
4740        if (dev_data->phys_dev_mem_props.memoryTypes[index].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) {
4741            mem_info->pData = 0;
4742        } else {
4743            if (size == VK_WHOLE_SIZE) {
4744                size = mem_info->allocInfo.allocationSize;
4745            }
4746            size_t convSize = (size_t)(size);
4747            mem_info->pData = malloc(2 * convSize);
4748            memset(mem_info->pData, NoncoherentMemoryFillValue, 2 * convSize);
4749            *ppData = static_cast<char *>(mem_info->pData) + (convSize / 2);
4750        }
4751    }
4752}
4753// Verify that state for fence being waited on is appropriate. That is,
4754//  a fence being waited on should not already be signalled and
4755//  it should have been submitted on a queue or during acquire next image
4756static inline bool verifyWaitFenceState(VkDevice device, VkFence fence, const char *apiCall) {
4757    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4758    bool skipCall = false;
4759    auto pFenceInfo = my_data->fenceMap.find(fence);
4760    if (pFenceInfo != my_data->fenceMap.end()) {
4761        if (!pFenceInfo->second.firstTimeFlag) {
4762            if (!pFenceInfo->second.needsSignaled) {
4763                skipCall |=
4764                    log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4765                            (uint64_t)fence, __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
4766                            "%s specified fence 0x%" PRIxLEAST64 " already in SIGNALED state.", apiCall, (uint64_t)fence);
4767            }
4768            if (pFenceInfo->second.queues.empty() && !pFenceInfo->second.swapchain) { // Checking status of unsubmitted fence
4769                skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4770                                    reinterpret_cast<uint64_t &>(fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
4771                                    "%s called for fence 0x%" PRIxLEAST64 " which has not been submitted on a Queue or during "
4772                                    "acquire next image.",
4773                                    apiCall, reinterpret_cast<uint64_t &>(fence));
4774            }
4775        } else {
4776            pFenceInfo->second.firstTimeFlag = false;
4777        }
4778    }
4779    return skipCall;
4780}
4781
4782VKAPI_ATTR VkResult VKAPI_CALL
4783WaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll, uint64_t timeout) {
4784    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4785    bool skip_call = false;
4786    // Verify fence status of submitted fences
4787    std::unique_lock<std::mutex> lock(global_lock);
4788    for (uint32_t i = 0; i < fenceCount; i++) {
4789        skip_call |= verifyWaitFenceState(device, pFences[i], "vkWaitForFences");
4790    }
4791    lock.unlock();
4792    if (skip_call)
4793        return VK_ERROR_VALIDATION_FAILED_EXT;
4794
4795    VkResult result = dev_data->device_dispatch_table->WaitForFences(device, fenceCount, pFences, waitAll, timeout);
4796
4797    if (result == VK_SUCCESS) {
4798        lock.lock();
4799        // When we know that all fences are complete we can clean/remove their CBs
4800        if (waitAll || fenceCount == 1) {
4801            skip_call |= decrementResources(dev_data, fenceCount, pFences);
4802        }
4803        // NOTE : Alternate case not handled here is when some fences have completed. In
4804        //  this case for app to guarantee which fences completed it will have to call
4805        //  vkGetFenceStatus() at which point we'll clean/remove their CBs if complete.
4806        lock.unlock();
4807    }
4808    if (skip_call)
4809        return VK_ERROR_VALIDATION_FAILED_EXT;
4810    return result;
4811}
4812
4813VKAPI_ATTR VkResult VKAPI_CALL GetFenceStatus(VkDevice device, VkFence fence) {
4814    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4815    bool skipCall = false;
4816    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
4817    std::unique_lock<std::mutex> lock(global_lock);
4818    skipCall = verifyWaitFenceState(device, fence, "vkGetFenceStatus");
4819    lock.unlock();
4820
4821    if (skipCall)
4822        return result;
4823
4824    result = dev_data->device_dispatch_table->GetFenceStatus(device, fence);
4825    bool skip_call = false;
4826    lock.lock();
4827    if (result == VK_SUCCESS) {
4828        skipCall |= decrementResources(dev_data, 1, &fence);
4829    }
4830    lock.unlock();
4831    if (skip_call)
4832        return VK_ERROR_VALIDATION_FAILED_EXT;
4833    return result;
4834}
4835
4836VKAPI_ATTR void VKAPI_CALL GetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex,
4837                                                            VkQueue *pQueue) {
4838    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4839    dev_data->device_dispatch_table->GetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue);
4840    std::lock_guard<std::mutex> lock(global_lock);
4841
4842    // Add queue to tracking set only if it is new
4843    auto result = dev_data->queues.emplace(*pQueue);
4844    if (result.second == true) {
4845        QUEUE_NODE *pQNode = &dev_data->queueMap[*pQueue];
4846        pQNode->queue = *pQueue;
4847        pQNode->device = device;
4848    }
4849}
4850
4851VKAPI_ATTR VkResult VKAPI_CALL QueueWaitIdle(VkQueue queue) {
4852    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
4853    bool skip_call = false;
4854    skip_call |= decrementResources(dev_data, queue);
4855    if (skip_call)
4856        return VK_ERROR_VALIDATION_FAILED_EXT;
4857    VkResult result = dev_data->device_dispatch_table->QueueWaitIdle(queue);
4858    return result;
4859}
4860
4861VKAPI_ATTR VkResult VKAPI_CALL DeviceWaitIdle(VkDevice device) {
4862    bool skip_call = false;
4863    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4864    std::unique_lock<std::mutex> lock(global_lock);
4865    for (auto queue : dev_data->queues) {
4866        skip_call |= decrementResources(dev_data, queue);
4867    }
4868    dev_data->globalInFlightCmdBuffers.clear();
4869    lock.unlock();
4870    if (skip_call)
4871        return VK_ERROR_VALIDATION_FAILED_EXT;
4872    VkResult result = dev_data->device_dispatch_table->DeviceWaitIdle(device);
4873    return result;
4874}
4875
4876VKAPI_ATTR void VKAPI_CALL DestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) {
4877    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4878    bool skipCall = false;
4879    std::unique_lock<std::mutex> lock(global_lock);
4880    auto fence_pair = dev_data->fenceMap.find(fence);
4881    if (fence_pair != dev_data->fenceMap.end()) {
4882        if (fence_pair->second.in_use.load()) {
4883            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4884                                (uint64_t)(fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
4885                                "Fence 0x%" PRIx64 " is in use by a command buffer.", (uint64_t)(fence));
4886        }
4887        dev_data->fenceMap.erase(fence_pair);
4888    }
4889    lock.unlock();
4890
4891    if (!skipCall)
4892        dev_data->device_dispatch_table->DestroyFence(device, fence, pAllocator);
4893}
4894
4895VKAPI_ATTR void VKAPI_CALL
4896DestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) {
4897    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4898    dev_data->device_dispatch_table->DestroySemaphore(device, semaphore, pAllocator);
4899    std::lock_guard<std::mutex> lock(global_lock);
4900    auto item = dev_data->semaphoreMap.find(semaphore);
4901    if (item != dev_data->semaphoreMap.end()) {
4902        if (item->second.in_use.load()) {
4903            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
4904                    reinterpret_cast<uint64_t &>(semaphore), __LINE__, DRAWSTATE_INVALID_SEMAPHORE, "DS",
4905                    "Cannot delete semaphore 0x%" PRIx64 " which is in use.", reinterpret_cast<uint64_t &>(semaphore));
4906        }
4907        dev_data->semaphoreMap.erase(semaphore);
4908    }
4909    // TODO : Clean up any internal data structures using this obj.
4910}
4911
4912VKAPI_ATTR void VKAPI_CALL DestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
4913    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4914    bool skip_call = false;
4915    std::unique_lock<std::mutex> lock(global_lock);
4916    auto event_data = dev_data->eventMap.find(event);
4917    if (event_data != dev_data->eventMap.end()) {
4918        if (event_data->second.in_use.load()) {
4919            skip_call |= log_msg(
4920                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4921                reinterpret_cast<uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
4922                "Cannot delete event 0x%" PRIx64 " which is in use by a command buffer.", reinterpret_cast<uint64_t &>(event));
4923        }
4924        dev_data->eventMap.erase(event_data);
4925    }
4926    lock.unlock();
4927    if (!skip_call)
4928        dev_data->device_dispatch_table->DestroyEvent(device, event, pAllocator);
4929    // TODO : Clean up any internal data structures using this obj.
4930}
4931
4932VKAPI_ATTR void VKAPI_CALL
4933DestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) {
4934    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
4935        ->device_dispatch_table->DestroyQueryPool(device, queryPool, pAllocator);
4936    // TODO : Clean up any internal data structures using this obj.
4937}
4938
4939VKAPI_ATTR VkResult VKAPI_CALL GetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery,
4940                                                   uint32_t queryCount, size_t dataSize, void *pData, VkDeviceSize stride,
4941                                                   VkQueryResultFlags flags) {
4942    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4943    unordered_map<QueryObject, vector<VkCommandBuffer>> queriesInFlight;
4944    std::unique_lock<std::mutex> lock(global_lock);
4945    for (auto cmdBuffer : dev_data->globalInFlightCmdBuffers) {
4946        auto pCB = getCBNode(dev_data, cmdBuffer);
4947        for (auto queryStatePair : pCB->queryToStateMap) {
4948            queriesInFlight[queryStatePair.first].push_back(cmdBuffer);
4949        }
4950    }
4951    bool skip_call = false;
4952    for (uint32_t i = 0; i < queryCount; ++i) {
4953        QueryObject query = {queryPool, firstQuery + i};
4954        auto queryElement = queriesInFlight.find(query);
4955        auto queryToStateElement = dev_data->queryToStateMap.find(query);
4956        if (queryToStateElement != dev_data->queryToStateMap.end()) {
4957            // Available and in flight
4958            if (queryElement != queriesInFlight.end() && queryToStateElement != dev_data->queryToStateMap.end() &&
4959                queryToStateElement->second) {
4960                for (auto cmdBuffer : queryElement->second) {
4961                    auto pCB = getCBNode(dev_data, cmdBuffer);
4962                    auto queryEventElement = pCB->waitedEventsBeforeQueryReset.find(query);
4963                    if (queryEventElement == pCB->waitedEventsBeforeQueryReset.end()) {
4964                        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4965                                             VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
4966                                             "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is in flight.",
4967                                             (uint64_t)(queryPool), firstQuery + i);
4968                    } else {
4969                        for (auto event : queryEventElement->second) {
4970                            dev_data->eventMap[event].needsSignaled = true;
4971                        }
4972                    }
4973                }
4974                // Unavailable and in flight
4975            } else if (queryElement != queriesInFlight.end() && queryToStateElement != dev_data->queryToStateMap.end() &&
4976                       !queryToStateElement->second) {
4977                // TODO : Can there be the same query in use by multiple command buffers in flight?
4978                bool make_available = false;
4979                for (auto cmdBuffer : queryElement->second) {
4980                    auto pCB = getCBNode(dev_data, cmdBuffer);
4981                    make_available |= pCB->queryToStateMap[query];
4982                }
4983                if (!(((flags & VK_QUERY_RESULT_PARTIAL_BIT) || (flags & VK_QUERY_RESULT_WAIT_BIT)) && make_available)) {
4984                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4985                                         VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
4986                                         "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is unavailable.",
4987                                         (uint64_t)(queryPool), firstQuery + i);
4988                }
4989                // Unavailable
4990            } else if (queryToStateElement != dev_data->queryToStateMap.end() && !queryToStateElement->second) {
4991                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4992                                     VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
4993                                     "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is unavailable.",
4994                                     (uint64_t)(queryPool), firstQuery + i);
4995                // Unitialized
4996            } else if (queryToStateElement == dev_data->queryToStateMap.end()) {
4997                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4998                                     VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
4999                                     "Cannot get query results on queryPool 0x%" PRIx64
5000                                     " with index %d as data has not been collected for this index.",
5001                                     (uint64_t)(queryPool), firstQuery + i);
5002            }
5003        }
5004    }
5005    lock.unlock();
5006    if (skip_call)
5007        return VK_ERROR_VALIDATION_FAILED_EXT;
5008    return dev_data->device_dispatch_table->GetQueryPoolResults(device, queryPool, firstQuery, queryCount, dataSize, pData, stride,
5009                                                                flags);
5010}
5011
5012static bool validateIdleBuffer(const layer_data *my_data, VkBuffer buffer) {
5013    bool skip_call = false;
5014    auto buffer_node = getBufferNode(my_data, buffer);
5015    if (!buffer_node) {
5016        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5017                             (uint64_t)(buffer), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
5018                             "Cannot free buffer 0x%" PRIxLEAST64 " that has not been allocated.", (uint64_t)(buffer));
5019    } else {
5020        if (buffer_node->in_use.load()) {
5021            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5022                                 (uint64_t)(buffer), __LINE__, DRAWSTATE_OBJECT_INUSE, "DS",
5023                                 "Cannot free buffer 0x%" PRIxLEAST64 " that is in use by a command buffer.", (uint64_t)(buffer));
5024        }
5025    }
5026    return skip_call;
5027}
5028
5029static bool print_memory_range_error(layer_data *dev_data, const uint64_t object_handle, const uint64_t other_handle,
5030                                     VkDebugReportObjectTypeEXT object_type) {
5031    if (object_type == VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT) {
5032        return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object_handle, 0,
5033                       MEMTRACK_INVALID_ALIASING, "MEM", "Buffer 0x%" PRIx64 " is aliased with image 0x%" PRIx64, object_handle,
5034                       other_handle);
5035    } else {
5036        return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object_handle, 0,
5037                       MEMTRACK_INVALID_ALIASING, "MEM", "Image 0x%" PRIx64 " is aliased with buffer 0x%" PRIx64, object_handle,
5038                       other_handle);
5039    }
5040}
5041
5042static bool validate_memory_range(layer_data *dev_data, const vector<MEMORY_RANGE> &ranges, const MEMORY_RANGE &new_range,
5043                                  VkDebugReportObjectTypeEXT object_type) {
5044    bool skip_call = false;
5045
5046    for (auto range : ranges) {
5047        if ((range.end & ~(dev_data->phys_dev_properties.properties.limits.bufferImageGranularity - 1)) <
5048            (new_range.start & ~(dev_data->phys_dev_properties.properties.limits.bufferImageGranularity - 1)))
5049            continue;
5050        if ((range.start & ~(dev_data->phys_dev_properties.properties.limits.bufferImageGranularity - 1)) >
5051            (new_range.end & ~(dev_data->phys_dev_properties.properties.limits.bufferImageGranularity - 1)))
5052            continue;
5053        skip_call |= print_memory_range_error(dev_data, new_range.handle, range.handle, object_type);
5054    }
5055    return skip_call;
5056}
5057
5058static MEMORY_RANGE insert_memory_ranges(uint64_t handle, VkDeviceMemory mem, VkDeviceSize memoryOffset,
5059                                         VkMemoryRequirements memRequirements, vector<MEMORY_RANGE> &ranges) {
5060    MEMORY_RANGE range;
5061    range.handle = handle;
5062    range.memory = mem;
5063    range.start = memoryOffset;
5064    range.end = memoryOffset + memRequirements.size - 1;
5065    ranges.push_back(range);
5066    return range;
5067}
5068
5069static void remove_memory_ranges(uint64_t handle, VkDeviceMemory mem, vector<MEMORY_RANGE> &ranges) {
5070    for (uint32_t item = 0; item < ranges.size(); item++) {
5071        if ((ranges[item].handle == handle) && (ranges[item].memory == mem)) {
5072            ranges.erase(ranges.begin() + item);
5073            break;
5074        }
5075    }
5076}
5077
5078VKAPI_ATTR void VKAPI_CALL DestroyBuffer(VkDevice device, VkBuffer buffer,
5079                                         const VkAllocationCallbacks *pAllocator) {
5080    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5081    bool skipCall = false;
5082    std::unique_lock<std::mutex> lock(global_lock);
5083    if (!validateIdleBuffer(dev_data, buffer) && !skipCall) {
5084        lock.unlock();
5085        dev_data->device_dispatch_table->DestroyBuffer(device, buffer, pAllocator);
5086        lock.lock();
5087    }
5088    // Clean up memory binding and range information for buffer
5089    auto buff_it = dev_data->bufferMap.find(buffer);
5090    if (buff_it != dev_data->bufferMap.end()) {
5091        auto mem_info = getMemObjInfo(dev_data, buff_it->second.get()->mem);
5092        if (mem_info) {
5093            remove_memory_ranges(reinterpret_cast<uint64_t &>(buffer), buff_it->second.get()->mem, mem_info->bufferRanges);
5094        }
5095        clear_object_binding(dev_data, reinterpret_cast<uint64_t &>(buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT);
5096        dev_data->bufferMap.erase(buff_it);
5097    }
5098}
5099
5100VKAPI_ATTR void VKAPI_CALL
5101DestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator) {
5102    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5103    dev_data->device_dispatch_table->DestroyBufferView(device, bufferView, pAllocator);
5104    std::lock_guard<std::mutex> lock(global_lock);
5105    auto item = dev_data->bufferViewMap.find(bufferView);
5106    if (item != dev_data->bufferViewMap.end()) {
5107        dev_data->bufferViewMap.erase(item);
5108    }
5109}
5110
5111VKAPI_ATTR void VKAPI_CALL DestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
5112    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5113    bool skipCall = false;
5114    if (!skipCall) {
5115        dev_data->device_dispatch_table->DestroyImage(device, image, pAllocator);
5116    }
5117
5118    std::lock_guard<std::mutex> lock(global_lock);
5119    const auto &imageEntry = dev_data->imageMap.find(image);
5120    if (imageEntry != dev_data->imageMap.end()) {
5121        // Clean up memory mapping, bindings and range references for image
5122        auto mem_info = getMemObjInfo(dev_data, imageEntry->second.get()->mem);
5123        if (mem_info) {
5124            remove_memory_ranges(reinterpret_cast<uint64_t &>(image), imageEntry->second.get()->mem, mem_info->imageRanges);
5125            clear_object_binding(dev_data, reinterpret_cast<uint64_t &>(image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
5126            mem_info->image = VK_NULL_HANDLE;
5127        }
5128        // Remove image from imageMap
5129        dev_data->imageMap.erase(imageEntry);
5130    }
5131    const auto& subEntry = dev_data->imageSubresourceMap.find(image);
5132    if (subEntry != dev_data->imageSubresourceMap.end()) {
5133        for (const auto& pair : subEntry->second) {
5134            dev_data->imageLayoutMap.erase(pair);
5135        }
5136        dev_data->imageSubresourceMap.erase(subEntry);
5137    }
5138}
5139
5140VKAPI_ATTR VkResult VKAPI_CALL
5141BindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
5142    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5143    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5144    std::unique_lock<std::mutex> lock(global_lock);
5145    // Track objects tied to memory
5146    uint64_t buffer_handle = (uint64_t)(buffer);
5147    bool skipCall =
5148        set_mem_binding(dev_data, mem, buffer_handle, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "vkBindBufferMemory");
5149    auto buffer_node = getBufferNode(dev_data, buffer);
5150    if (buffer_node) {
5151        buffer_node->mem = mem;
5152        VkMemoryRequirements memRequirements;
5153        dev_data->device_dispatch_table->GetBufferMemoryRequirements(device, buffer, &memRequirements);
5154
5155        // Track and validate bound memory range information
5156        auto mem_info = getMemObjInfo(dev_data, mem);
5157        if (mem_info) {
5158            const MEMORY_RANGE range =
5159                insert_memory_ranges(buffer_handle, mem, memoryOffset, memRequirements, mem_info->bufferRanges);
5160            skipCall |= validate_memory_range(dev_data, mem_info->imageRanges, range, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT);
5161        }
5162
5163        // Validate memory requirements alignment
5164        if (vk_safe_modulo(memoryOffset, memRequirements.alignment) != 0) {
5165            skipCall |=
5166                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
5167                        __LINE__, DRAWSTATE_INVALID_BUFFER_MEMORY_OFFSET, "DS",
5168                        "vkBindBufferMemory(): memoryOffset is 0x%" PRIxLEAST64 " but must be an integer multiple of the "
5169                        "VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
5170                        ", returned from a call to vkGetBufferMemoryRequirements with buffer",
5171                        memoryOffset, memRequirements.alignment);
5172        }
5173        // Validate device limits alignments
5174        VkBufferUsageFlags usage = dev_data->bufferMap[buffer].get()->createInfo.usage;
5175        if (usage & (VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT)) {
5176            if (vk_safe_modulo(memoryOffset, dev_data->phys_dev_properties.properties.limits.minTexelBufferOffsetAlignment) != 0) {
5177                skipCall |=
5178                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
5179                            0, __LINE__, DRAWSTATE_INVALID_TEXEL_BUFFER_OFFSET, "DS",
5180                            "vkBindBufferMemory(): memoryOffset is 0x%" PRIxLEAST64 " but must be a multiple of "
5181                            "device limit minTexelBufferOffsetAlignment 0x%" PRIxLEAST64,
5182                            memoryOffset, dev_data->phys_dev_properties.properties.limits.minTexelBufferOffsetAlignment);
5183            }
5184        }
5185        if (usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) {
5186            if (vk_safe_modulo(memoryOffset, dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment) !=
5187                0) {
5188                skipCall |=
5189                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
5190                            0, __LINE__, DRAWSTATE_INVALID_UNIFORM_BUFFER_OFFSET, "DS",
5191                            "vkBindBufferMemory(): memoryOffset is 0x%" PRIxLEAST64 " but must be a multiple of "
5192                            "device limit minUniformBufferOffsetAlignment 0x%" PRIxLEAST64,
5193                            memoryOffset, dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment);
5194            }
5195        }
5196        if (usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) {
5197            if (vk_safe_modulo(memoryOffset, dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment) !=
5198                0) {
5199                skipCall |=
5200                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
5201                            0, __LINE__, DRAWSTATE_INVALID_STORAGE_BUFFER_OFFSET, "DS",
5202                            "vkBindBufferMemory(): memoryOffset is 0x%" PRIxLEAST64 " but must be a multiple of "
5203                            "device limit minStorageBufferOffsetAlignment 0x%" PRIxLEAST64,
5204                            memoryOffset, dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment);
5205            }
5206        }
5207    }
5208    print_mem_list(dev_data);
5209    lock.unlock();
5210    if (!skipCall) {
5211        result = dev_data->device_dispatch_table->BindBufferMemory(device, buffer, mem, memoryOffset);
5212    }
5213    return result;
5214}
5215
5216VKAPI_ATTR void VKAPI_CALL
5217GetBufferMemoryRequirements(VkDevice device, VkBuffer buffer, VkMemoryRequirements *pMemoryRequirements) {
5218    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5219    // TODO : What to track here?
5220    //   Could potentially save returned mem requirements and validate values passed into BindBufferMemory
5221    my_data->device_dispatch_table->GetBufferMemoryRequirements(device, buffer, pMemoryRequirements);
5222}
5223
5224VKAPI_ATTR void VKAPI_CALL
5225GetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements *pMemoryRequirements) {
5226    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5227    // TODO : What to track here?
5228    //   Could potentially save returned mem requirements and validate values passed into BindImageMemory
5229    my_data->device_dispatch_table->GetImageMemoryRequirements(device, image, pMemoryRequirements);
5230}
5231
5232VKAPI_ATTR void VKAPI_CALL
5233DestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator) {
5234    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5235        ->device_dispatch_table->DestroyImageView(device, imageView, pAllocator);
5236    // TODO : Clean up any internal data structures using this obj.
5237}
5238
5239VKAPI_ATTR void VKAPI_CALL
5240DestroyShaderModule(VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks *pAllocator) {
5241    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5242
5243    std::unique_lock<std::mutex> lock(global_lock);
5244    my_data->shaderModuleMap.erase(shaderModule);
5245    lock.unlock();
5246
5247    my_data->device_dispatch_table->DestroyShaderModule(device, shaderModule, pAllocator);
5248}
5249
5250VKAPI_ATTR void VKAPI_CALL
5251DestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) {
5252    get_my_data_ptr(get_dispatch_key(device), layer_data_map)->device_dispatch_table->DestroyPipeline(device, pipeline, pAllocator);
5253    // TODO : Clean up any internal data structures using this obj.
5254}
5255
5256VKAPI_ATTR void VKAPI_CALL
5257DestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks *pAllocator) {
5258    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5259        ->device_dispatch_table->DestroyPipelineLayout(device, pipelineLayout, pAllocator);
5260    // TODO : Clean up any internal data structures using this obj.
5261}
5262
5263VKAPI_ATTR void VKAPI_CALL
5264DestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) {
5265    get_my_data_ptr(get_dispatch_key(device), layer_data_map)->device_dispatch_table->DestroySampler(device, sampler, pAllocator);
5266    // TODO : Clean up any internal data structures using this obj.
5267}
5268
5269VKAPI_ATTR void VKAPI_CALL
5270DestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks *pAllocator) {
5271    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5272        ->device_dispatch_table->DestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator);
5273    // TODO : Clean up any internal data structures using this obj.
5274}
5275
5276VKAPI_ATTR void VKAPI_CALL
5277DestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks *pAllocator) {
5278    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5279        ->device_dispatch_table->DestroyDescriptorPool(device, descriptorPool, pAllocator);
5280    // TODO : Clean up any internal data structures using this obj.
5281}
5282// Verify cmdBuffer in given cb_node is not in global in-flight set, and return skip_call result
5283//  If this is a secondary command buffer, then make sure its primary is also in-flight
5284//  If primary is not in-flight, then remove secondary from global in-flight set
5285// This function is only valid at a point when cmdBuffer is being reset or freed
5286static bool checkAndClearCommandBufferInFlight(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const char *action) {
5287    bool skip_call = false;
5288    if (dev_data->globalInFlightCmdBuffers.count(cb_node->commandBuffer)) {
5289        // Primary CB or secondary where primary is also in-flight is an error
5290        if ((cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_SECONDARY) ||
5291            (dev_data->globalInFlightCmdBuffers.count(cb_node->primaryCommandBuffer))) {
5292            skip_call |= log_msg(
5293                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5294                reinterpret_cast<const uint64_t &>(cb_node->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
5295                "Attempt to %s command buffer (0x%" PRIxLEAST64 ") which is in use.", action,
5296                reinterpret_cast<const uint64_t &>(cb_node->commandBuffer));
5297        } else { // Secondary CB w/o primary in-flight, remove from in-flight
5298            dev_data->globalInFlightCmdBuffers.erase(cb_node->commandBuffer);
5299        }
5300    }
5301    return skip_call;
5302}
5303// Iterate over all cmdBuffers in given commandPool and verify that each is not in use
5304static bool checkAndClearCommandBuffersInFlight(layer_data *dev_data, const VkCommandPool commandPool, const char *action) {
5305    bool skip_call = false;
5306    auto pool_data = dev_data->commandPoolMap.find(commandPool);
5307    if (pool_data != dev_data->commandPoolMap.end()) {
5308        for (auto cmd_buffer : pool_data->second.commandBuffers) {
5309            if (dev_data->globalInFlightCmdBuffers.count(cmd_buffer)) {
5310                skip_call |= checkAndClearCommandBufferInFlight(dev_data, getCBNode(dev_data, cmd_buffer), action);
5311            }
5312        }
5313    }
5314    return skip_call;
5315}
5316
5317VKAPI_ATTR void VKAPI_CALL
5318FreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer *pCommandBuffers) {
5319    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5320
5321    bool skip_call = false;
5322    std::unique_lock<std::mutex> lock(global_lock);
5323    for (uint32_t i = 0; i < commandBufferCount; i++) {
5324        auto cb_pair = dev_data->commandBufferMap.find(pCommandBuffers[i]);
5325        skip_call |= checkAndClearCommandBufferInFlight(dev_data, cb_pair->second, "free");
5326        // Delete CB information structure, and remove from commandBufferMap
5327        if (cb_pair != dev_data->commandBufferMap.end()) {
5328            // reset prior to delete for data clean-up
5329            resetCB(dev_data, (*cb_pair).second->commandBuffer);
5330            delete (*cb_pair).second;
5331            dev_data->commandBufferMap.erase(cb_pair);
5332        }
5333
5334        // Remove commandBuffer reference from commandPoolMap
5335        dev_data->commandPoolMap[commandPool].commandBuffers.remove(pCommandBuffers[i]);
5336    }
5337    printCBList(dev_data);
5338    lock.unlock();
5339
5340    if (!skip_call)
5341        dev_data->device_dispatch_table->FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);
5342}
5343
5344VKAPI_ATTR VkResult VKAPI_CALL CreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
5345                                                 const VkAllocationCallbacks *pAllocator,
5346                                                 VkCommandPool *pCommandPool) {
5347    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5348
5349    VkResult result = dev_data->device_dispatch_table->CreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool);
5350
5351    if (VK_SUCCESS == result) {
5352        std::lock_guard<std::mutex> lock(global_lock);
5353        dev_data->commandPoolMap[*pCommandPool].createFlags = pCreateInfo->flags;
5354        dev_data->commandPoolMap[*pCommandPool].queueFamilyIndex = pCreateInfo->queueFamilyIndex;
5355    }
5356    return result;
5357}
5358
5359VKAPI_ATTR VkResult VKAPI_CALL CreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
5360                                               const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) {
5361
5362    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5363    VkResult result = dev_data->device_dispatch_table->CreateQueryPool(device, pCreateInfo, pAllocator, pQueryPool);
5364    if (result == VK_SUCCESS) {
5365        std::lock_guard<std::mutex> lock(global_lock);
5366        dev_data->queryPoolMap[*pQueryPool].createInfo = *pCreateInfo;
5367    }
5368    return result;
5369}
5370
5371// Destroy commandPool along with all of the commandBuffers allocated from that pool
5372VKAPI_ATTR void VKAPI_CALL
5373DestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
5374    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5375    bool skipCall = false;
5376    std::unique_lock<std::mutex> lock(global_lock);
5377    // Verify that command buffers in pool are complete (not in-flight)
5378    VkBool32 result = checkAndClearCommandBuffersInFlight(dev_data, commandPool, "destroy command pool with");
5379    // Must remove cmdpool from cmdpoolmap, after removing all cmdbuffers in its list from the commandPoolMap
5380    auto pool_it = dev_data->commandPoolMap.find(commandPool);
5381    if (pool_it != dev_data->commandPoolMap.end()) {
5382        for (auto cb : pool_it->second.commandBuffers) {
5383            clear_cmd_buf_and_mem_references(dev_data, cb);
5384            auto del_cb = dev_data->commandBufferMap.find(cb);
5385            delete del_cb->second;                  // delete CB info structure
5386            dev_data->commandBufferMap.erase(del_cb); // Remove this command buffer
5387        }
5388    }
5389    dev_data->commandPoolMap.erase(commandPool);
5390
5391    lock.unlock();
5392
5393    if (result)
5394        return;
5395
5396    if (!skipCall)
5397        dev_data->device_dispatch_table->DestroyCommandPool(device, commandPool, pAllocator);
5398}
5399
5400VKAPI_ATTR VkResult VKAPI_CALL
5401ResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) {
5402    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5403    bool skipCall = false;
5404    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5405
5406    if (checkAndClearCommandBuffersInFlight(dev_data, commandPool, "reset command pool with"))
5407        return VK_ERROR_VALIDATION_FAILED_EXT;
5408
5409    if (!skipCall)
5410        result = dev_data->device_dispatch_table->ResetCommandPool(device, commandPool, flags);
5411
5412    // Reset all of the CBs allocated from this pool
5413    if (VK_SUCCESS == result) {
5414        std::lock_guard<std::mutex> lock(global_lock);
5415        auto it = dev_data->commandPoolMap[commandPool].commandBuffers.begin();
5416        while (it != dev_data->commandPoolMap[commandPool].commandBuffers.end()) {
5417            resetCB(dev_data, (*it));
5418            ++it;
5419        }
5420    }
5421    return result;
5422}
5423
5424VKAPI_ATTR VkResult VKAPI_CALL ResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) {
5425    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5426    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5427    bool skipCall = false;
5428    std::unique_lock<std::mutex> lock(global_lock);
5429    for (uint32_t i = 0; i < fenceCount; ++i) {
5430        auto fence_item = dev_data->fenceMap.find(pFences[i]);
5431        if (fence_item != dev_data->fenceMap.end()) {
5432            fence_item->second.needsSignaled = true;
5433            fence_item->second.queues.clear();
5434            fence_item->second.priorFences.clear();
5435            if (fence_item->second.in_use.load()) {
5436                skipCall |=
5437                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5438                            reinterpret_cast<const uint64_t &>(pFences[i]), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
5439                            "Fence 0x%" PRIx64 " is in use by a command buffer.", reinterpret_cast<const uint64_t &>(pFences[i]));
5440            }
5441        }
5442    }
5443    lock.unlock();
5444    if (!skipCall)
5445        result = dev_data->device_dispatch_table->ResetFences(device, fenceCount, pFences);
5446    return result;
5447}
5448
5449VKAPI_ATTR void VKAPI_CALL
5450DestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) {
5451    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5452    std::unique_lock<std::mutex> lock(global_lock);
5453    auto fbNode = dev_data->frameBufferMap.find(framebuffer);
5454    if (fbNode != dev_data->frameBufferMap.end()) {
5455        for (auto cb : fbNode->second.referencingCmdBuffers) {
5456            auto cbNode = dev_data->commandBufferMap.find(cb);
5457            if (cbNode != dev_data->commandBufferMap.end()) {
5458                // Set CB as invalid and record destroyed framebuffer
5459                cbNode->second->state = CB_INVALID;
5460                cbNode->second->destroyedFramebuffers.insert(framebuffer);
5461            }
5462        }
5463        delete [] fbNode->second.createInfo.pAttachments;
5464        dev_data->frameBufferMap.erase(fbNode);
5465    }
5466    lock.unlock();
5467    dev_data->device_dispatch_table->DestroyFramebuffer(device, framebuffer, pAllocator);
5468}
5469
5470VKAPI_ATTR void VKAPI_CALL
5471DestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) {
5472    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5473    dev_data->device_dispatch_table->DestroyRenderPass(device, renderPass, pAllocator);
5474    std::lock_guard<std::mutex> lock(global_lock);
5475    dev_data->renderPassMap.erase(renderPass);
5476}
5477
5478VKAPI_ATTR VkResult VKAPI_CALL CreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo,
5479                                            const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) {
5480    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5481
5482    VkResult result = dev_data->device_dispatch_table->CreateBuffer(device, pCreateInfo, pAllocator, pBuffer);
5483
5484    if (VK_SUCCESS == result) {
5485        std::lock_guard<std::mutex> lock(global_lock);
5486        // TODO : This doesn't create deep copy of pQueueFamilyIndices so need to fix that if/when we want that data to be valid
5487        dev_data->bufferMap.insert(std::make_pair(*pBuffer, unique_ptr<BUFFER_NODE>(new BUFFER_NODE(pCreateInfo))));
5488    }
5489    return result;
5490}
5491
5492VKAPI_ATTR VkResult VKAPI_CALL CreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo,
5493                                                const VkAllocationCallbacks *pAllocator, VkBufferView *pView) {
5494    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5495    VkResult result = dev_data->device_dispatch_table->CreateBufferView(device, pCreateInfo, pAllocator, pView);
5496    if (VK_SUCCESS == result) {
5497        std::lock_guard<std::mutex> lock(global_lock);
5498        dev_data->bufferViewMap[*pView] = unique_ptr<VkBufferViewCreateInfo>(new VkBufferViewCreateInfo(*pCreateInfo));
5499        // In order to create a valid buffer view, the buffer must have been created with at least one of the
5500        // following flags:  UNIFORM_TEXEL_BUFFER_BIT or STORAGE_TEXEL_BUFFER_BIT
5501        validate_buffer_usage_flags(dev_data, pCreateInfo->buffer,
5502                                    VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT, false,
5503                                    "vkCreateBufferView()", "VK_BUFFER_USAGE_[STORAGE|UNIFORM]_TEXEL_BUFFER_BIT");
5504    }
5505    return result;
5506}
5507
5508VKAPI_ATTR VkResult VKAPI_CALL CreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo,
5509                                           const VkAllocationCallbacks *pAllocator, VkImage *pImage) {
5510    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5511
5512    VkResult result = dev_data->device_dispatch_table->CreateImage(device, pCreateInfo, pAllocator, pImage);
5513
5514    if (VK_SUCCESS == result) {
5515        std::lock_guard<std::mutex> lock(global_lock);
5516        IMAGE_LAYOUT_NODE image_node;
5517        image_node.layout = pCreateInfo->initialLayout;
5518        image_node.format = pCreateInfo->format;
5519        dev_data->imageMap.insert(std::make_pair(*pImage, unique_ptr<IMAGE_NODE>(new IMAGE_NODE(pCreateInfo))));
5520        ImageSubresourcePair subpair = {*pImage, false, VkImageSubresource()};
5521        dev_data->imageSubresourceMap[*pImage].push_back(subpair);
5522        dev_data->imageLayoutMap[subpair] = image_node;
5523    }
5524    return result;
5525}
5526
5527static void ResolveRemainingLevelsLayers(layer_data *dev_data, VkImageSubresourceRange *range, VkImage image) {
5528    /* expects global_lock to be held by caller */
5529
5530    auto image_node = getImageNode(dev_data, image);
5531    if (image_node) {
5532        /* If the caller used the special values VK_REMAINING_MIP_LEVELS and
5533         * VK_REMAINING_ARRAY_LAYERS, resolve them now in our internal state to
5534         * the actual values.
5535         */
5536        if (range->levelCount == VK_REMAINING_MIP_LEVELS) {
5537            range->levelCount = image_node->createInfo.mipLevels - range->baseMipLevel;
5538        }
5539
5540        if (range->layerCount == VK_REMAINING_ARRAY_LAYERS) {
5541            range->layerCount = image_node->createInfo.arrayLayers - range->baseArrayLayer;
5542        }
5543    }
5544}
5545
5546// Return the correct layer/level counts if the caller used the special
5547// values VK_REMAINING_MIP_LEVELS or VK_REMAINING_ARRAY_LAYERS.
5548static void ResolveRemainingLevelsLayers(layer_data *dev_data, uint32_t *levels, uint32_t *layers, VkImageSubresourceRange range,
5549                                         VkImage image) {
5550    /* expects global_lock to be held by caller */
5551
5552    *levels = range.levelCount;
5553    *layers = range.layerCount;
5554    auto image_node = getImageNode(dev_data, image);
5555    if (image_node) {
5556        if (range.levelCount == VK_REMAINING_MIP_LEVELS) {
5557            *levels = image_node->createInfo.mipLevels - range.baseMipLevel;
5558        }
5559        if (range.layerCount == VK_REMAINING_ARRAY_LAYERS) {
5560            *layers = image_node->createInfo.arrayLayers - range.baseArrayLayer;
5561        }
5562    }
5563}
5564
5565VKAPI_ATTR VkResult VKAPI_CALL CreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo,
5566                                               const VkAllocationCallbacks *pAllocator, VkImageView *pView) {
5567    bool skipCall = false;
5568    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5569    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5570    {
5571        // Validate that img has correct usage flags set
5572        std::lock_guard<std::mutex> lock(global_lock);
5573        skipCall |= validate_image_usage_flags(dev_data, pCreateInfo->image,
5574                VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_STORAGE_BIT |
5575                VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
5576                false, "vkCreateImageView()", "VK_IMAGE_USAGE_[SAMPLED|STORAGE|COLOR_ATTACHMENT]_BIT");
5577    }
5578
5579    if (!skipCall) {
5580        result = dev_data->device_dispatch_table->CreateImageView(device, pCreateInfo, pAllocator, pView);
5581    }
5582
5583    if (VK_SUCCESS == result) {
5584        std::lock_guard<std::mutex> lock(global_lock);
5585        dev_data->imageViewMap[*pView] = unique_ptr<VkImageViewCreateInfo>(new VkImageViewCreateInfo(*pCreateInfo));
5586        ResolveRemainingLevelsLayers(dev_data, &dev_data->imageViewMap[*pView].get()->subresourceRange, pCreateInfo->image);
5587    }
5588
5589    return result;
5590}
5591
5592VKAPI_ATTR VkResult VKAPI_CALL
5593CreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkFence *pFence) {
5594    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5595    VkResult result = dev_data->device_dispatch_table->CreateFence(device, pCreateInfo, pAllocator, pFence);
5596    if (VK_SUCCESS == result) {
5597        std::lock_guard<std::mutex> lock(global_lock);
5598        auto &fence_node = dev_data->fenceMap[*pFence];
5599        fence_node.fence = *pFence;
5600        fence_node.createInfo = *pCreateInfo;
5601        fence_node.needsSignaled = true;
5602        if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) {
5603            fence_node.firstTimeFlag = true;
5604            fence_node.needsSignaled = false;
5605        }
5606        fence_node.in_use.store(0);
5607    }
5608    return result;
5609}
5610
5611// TODO handle pipeline caches
5612VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo *pCreateInfo,
5613                                                   const VkAllocationCallbacks *pAllocator, VkPipelineCache *pPipelineCache) {
5614    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5615    VkResult result = dev_data->device_dispatch_table->CreatePipelineCache(device, pCreateInfo, pAllocator, pPipelineCache);
5616    return result;
5617}
5618
5619VKAPI_ATTR void VKAPI_CALL
5620DestroyPipelineCache(VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks *pAllocator) {
5621    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5622    dev_data->device_dispatch_table->DestroyPipelineCache(device, pipelineCache, pAllocator);
5623}
5624
5625VKAPI_ATTR VkResult VKAPI_CALL
5626GetPipelineCacheData(VkDevice device, VkPipelineCache pipelineCache, size_t *pDataSize, void *pData) {
5627    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5628    VkResult result = dev_data->device_dispatch_table->GetPipelineCacheData(device, pipelineCache, pDataSize, pData);
5629    return result;
5630}
5631
5632VKAPI_ATTR VkResult VKAPI_CALL
5633MergePipelineCaches(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache *pSrcCaches) {
5634    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5635    VkResult result = dev_data->device_dispatch_table->MergePipelineCaches(device, dstCache, srcCacheCount, pSrcCaches);
5636    return result;
5637}
5638
5639// utility function to set collective state for pipeline
5640void set_pipeline_state(PIPELINE_NODE *pPipe) {
5641    // If any attachment used by this pipeline has blendEnable, set top-level blendEnable
5642    if (pPipe->graphicsPipelineCI.pColorBlendState) {
5643        for (size_t i = 0; i < pPipe->attachments.size(); ++i) {
5644            if (VK_TRUE == pPipe->attachments[i].blendEnable) {
5645                if (((pPipe->attachments[i].dstAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
5646                     (pPipe->attachments[i].dstAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
5647                    ((pPipe->attachments[i].dstColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
5648                     (pPipe->attachments[i].dstColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
5649                    ((pPipe->attachments[i].srcAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
5650                     (pPipe->attachments[i].srcAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
5651                    ((pPipe->attachments[i].srcColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
5652                     (pPipe->attachments[i].srcColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA))) {
5653                    pPipe->blendConstantsEnabled = true;
5654                }
5655            }
5656        }
5657    }
5658}
5659
5660VKAPI_ATTR VkResult VKAPI_CALL
5661CreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
5662                        const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
5663                        VkPipeline *pPipelines) {
5664    VkResult result = VK_SUCCESS;
5665    // TODO What to do with pipelineCache?
5666    // The order of operations here is a little convoluted but gets the job done
5667    //  1. Pipeline create state is first shadowed into PIPELINE_NODE struct
5668    //  2. Create state is then validated (which uses flags setup during shadowing)
5669    //  3. If everything looks good, we'll then create the pipeline and add NODE to pipelineMap
5670    bool skipCall = false;
5671    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
5672    vector<PIPELINE_NODE *> pPipeNode(count);
5673    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5674
5675    uint32_t i = 0;
5676    std::unique_lock<std::mutex> lock(global_lock);
5677
5678    for (i = 0; i < count; i++) {
5679        pPipeNode[i] = new PIPELINE_NODE;
5680        pPipeNode[i]->initGraphicsPipeline(&pCreateInfos[i]);
5681        pPipeNode[i]->renderPass = getRenderPass(dev_data, pCreateInfos[i].renderPass);
5682        pPipeNode[i]->pipelineLayout = getPipelineLayout(dev_data, pCreateInfos[i].layout);
5683
5684        skipCall |= verifyPipelineCreateState(dev_data, device, pPipeNode, i);
5685    }
5686
5687    if (!skipCall) {
5688        lock.unlock();
5689        result = dev_data->device_dispatch_table->CreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator,
5690                                                                          pPipelines);
5691        lock.lock();
5692        for (i = 0; i < count; i++) {
5693            pPipeNode[i]->pipeline = pPipelines[i];
5694            dev_data->pipelineMap[pPipeNode[i]->pipeline] = pPipeNode[i];
5695        }
5696        lock.unlock();
5697    } else {
5698        for (i = 0; i < count; i++) {
5699            delete pPipeNode[i];
5700        }
5701        lock.unlock();
5702        return VK_ERROR_VALIDATION_FAILED_EXT;
5703    }
5704    return result;
5705}
5706
5707VKAPI_ATTR VkResult VKAPI_CALL
5708CreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
5709                       const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
5710                       VkPipeline *pPipelines) {
5711    VkResult result = VK_SUCCESS;
5712    bool skipCall = false;
5713
5714    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
5715    vector<PIPELINE_NODE *> pPipeNode(count);
5716    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5717
5718    uint32_t i = 0;
5719    std::unique_lock<std::mutex> lock(global_lock);
5720    for (i = 0; i < count; i++) {
5721        // TODO: Verify compute stage bits
5722
5723        // Create and initialize internal tracking data structure
5724        pPipeNode[i] = new PIPELINE_NODE;
5725        pPipeNode[i]->initComputePipeline(&pCreateInfos[i]);
5726        pPipeNode[i]->pipelineLayout = getPipelineLayout(dev_data, pCreateInfos[i].layout);
5727        // memcpy(&pPipeNode[i]->computePipelineCI, (const void *)&pCreateInfos[i], sizeof(VkComputePipelineCreateInfo));
5728
5729        // TODO: Add Compute Pipeline Verification
5730        skipCall |= !validate_compute_pipeline(dev_data->report_data, pPipeNode[i],
5731                                               &dev_data->phys_dev_properties.features,
5732                                               dev_data->shaderModuleMap);
5733        // skipCall |= verifyPipelineCreateState(dev_data, device, pPipeNode[i]);
5734    }
5735
5736    if (!skipCall) {
5737        lock.unlock();
5738        result = dev_data->device_dispatch_table->CreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator,
5739                                                                         pPipelines);
5740        lock.lock();
5741        for (i = 0; i < count; i++) {
5742            pPipeNode[i]->pipeline = pPipelines[i];
5743            dev_data->pipelineMap[pPipeNode[i]->pipeline] = pPipeNode[i];
5744        }
5745        lock.unlock();
5746    } else {
5747        for (i = 0; i < count; i++) {
5748            // Clean up any locally allocated data structures
5749            delete pPipeNode[i];
5750        }
5751        lock.unlock();
5752        return VK_ERROR_VALIDATION_FAILED_EXT;
5753    }
5754    return result;
5755}
5756
5757VKAPI_ATTR VkResult VKAPI_CALL CreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo,
5758                                             const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) {
5759    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5760    VkResult result = dev_data->device_dispatch_table->CreateSampler(device, pCreateInfo, pAllocator, pSampler);
5761    if (VK_SUCCESS == result) {
5762        std::lock_guard<std::mutex> lock(global_lock);
5763        dev_data->samplerMap[*pSampler] = unique_ptr<SAMPLER_NODE>(new SAMPLER_NODE(pSampler, pCreateInfo));
5764    }
5765    return result;
5766}
5767
5768VKAPI_ATTR VkResult VKAPI_CALL
5769CreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
5770                          const VkAllocationCallbacks *pAllocator, VkDescriptorSetLayout *pSetLayout) {
5771    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5772    VkResult result = dev_data->device_dispatch_table->CreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout);
5773    if (VK_SUCCESS == result) {
5774        // TODOSC : Capture layout bindings set
5775        std::lock_guard<std::mutex> lock(global_lock);
5776        dev_data->descriptorSetLayoutMap[*pSetLayout] =
5777            new cvdescriptorset::DescriptorSetLayout(dev_data->report_data, pCreateInfo, *pSetLayout);
5778    }
5779    return result;
5780}
5781
5782// Used by CreatePipelineLayout and CmdPushConstants.
5783// Note that the index argument is optional and only used by CreatePipelineLayout.
5784static bool validatePushConstantRange(const layer_data *dev_data, const uint32_t offset, const uint32_t size,
5785                                      const char *caller_name, uint32_t index = 0) {
5786    uint32_t const maxPushConstantsSize = dev_data->phys_dev_properties.properties.limits.maxPushConstantsSize;
5787    bool skipCall = false;
5788    // Check that offset + size don't exceed the max.
5789    // Prevent arithetic overflow here by avoiding addition and testing in this order.
5790    if ((offset >= maxPushConstantsSize) || (size > maxPushConstantsSize - offset)) {
5791        // This is a pain just to adapt the log message to the caller, but better to sort it out only when there is a problem.
5792        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
5793            skipCall |=
5794                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5795                        DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants index %u with offset %u and size %u that "
5796                                                              "exceeds this device's maxPushConstantSize of %u.",
5797                        caller_name, index, offset, size, maxPushConstantsSize);
5798        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
5799            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5800                                DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with offset %u and size %u that "
5801                                                                      "exceeds this device's maxPushConstantSize of %u.",
5802                                caller_name, offset, size, maxPushConstantsSize);
5803        } else {
5804            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5805                                DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
5806        }
5807    }
5808    // size needs to be non-zero and a multiple of 4.
5809    if ((size == 0) || ((size & 0x3) != 0)) {
5810        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
5811            skipCall |=
5812                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5813                        DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants index %u with "
5814                                                              "size %u. Size must be greater than zero and a multiple of 4.",
5815                        caller_name, index, size);
5816        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
5817            skipCall |=
5818                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5819                        DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with "
5820                                                              "size %u. Size must be greater than zero and a multiple of 4.",
5821                        caller_name, size);
5822        } else {
5823            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5824                                DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
5825        }
5826    }
5827    // offset needs to be a multiple of 4.
5828    if ((offset & 0x3) != 0) {
5829        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
5830            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5831                                DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants index %u with "
5832                                                                      "offset %u. Offset must be a multiple of 4.",
5833                                caller_name, index, offset);
5834        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
5835            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5836                                DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with "
5837                                                                      "offset %u. Offset must be a multiple of 4.",
5838                                caller_name, offset);
5839        } else {
5840            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5841                                DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
5842        }
5843    }
5844    return skipCall;
5845}
5846
5847VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
5848                                                    const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout) {
5849    bool skipCall = false;
5850    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5851    // Push Constant Range checks
5852    uint32_t i = 0;
5853    for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
5854        skipCall |= validatePushConstantRange(dev_data, pCreateInfo->pPushConstantRanges[i].offset,
5855                                              pCreateInfo->pPushConstantRanges[i].size, "vkCreatePipelineLayout()", i);
5856        if (0 == pCreateInfo->pPushConstantRanges[i].stageFlags) {
5857            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5858                                DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCreatePipelineLayout() call has no stageFlags set.");
5859        }
5860    }
5861    // Each range has been validated.  Now check for overlap between ranges (if they are good).
5862    if (!skipCall) {
5863        uint32_t i, j;
5864        for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
5865            for (j = i + 1; j < pCreateInfo->pushConstantRangeCount; ++j) {
5866                const uint32_t minA = pCreateInfo->pPushConstantRanges[i].offset;
5867                const uint32_t maxA = minA + pCreateInfo->pPushConstantRanges[i].size;
5868                const uint32_t minB = pCreateInfo->pPushConstantRanges[j].offset;
5869                const uint32_t maxB = minB + pCreateInfo->pPushConstantRanges[j].size;
5870                if ((minA <= minB && maxA > minB) || (minB <= minA && maxB > minA)) {
5871                    skipCall |=
5872                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5873                                DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCreatePipelineLayout() call has push constants with "
5874                                                                      "overlapping ranges: %u:[%u, %u), %u:[%u, %u)",
5875                                i, minA, maxA, j, minB, maxB);
5876                }
5877            }
5878        }
5879    }
5880
5881    if (skipCall)
5882        return VK_ERROR_VALIDATION_FAILED_EXT;
5883
5884    VkResult result = dev_data->device_dispatch_table->CreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout);
5885    if (VK_SUCCESS == result) {
5886        std::lock_guard<std::mutex> lock(global_lock);
5887        PIPELINE_LAYOUT_NODE &plNode = dev_data->pipelineLayoutMap[*pPipelineLayout];
5888        plNode.descriptorSetLayouts.resize(pCreateInfo->setLayoutCount);
5889        plNode.setLayouts.resize(pCreateInfo->setLayoutCount);
5890        for (i = 0; i < pCreateInfo->setLayoutCount; ++i) {
5891            plNode.descriptorSetLayouts[i] = pCreateInfo->pSetLayouts[i];
5892            plNode.setLayouts[i] = getDescriptorSetLayout(dev_data, pCreateInfo->pSetLayouts[i]);
5893        }
5894        plNode.pushConstantRanges.resize(pCreateInfo->pushConstantRangeCount);
5895        for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
5896            plNode.pushConstantRanges[i] = pCreateInfo->pPushConstantRanges[i];
5897        }
5898    }
5899    return result;
5900}
5901
5902VKAPI_ATTR VkResult VKAPI_CALL
5903CreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
5904                     VkDescriptorPool *pDescriptorPool) {
5905    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5906    VkResult result = dev_data->device_dispatch_table->CreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool);
5907    if (VK_SUCCESS == result) {
5908        // Insert this pool into Global Pool LL at head
5909        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
5910                    (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS", "Created Descriptor Pool 0x%" PRIxLEAST64,
5911                    (uint64_t)*pDescriptorPool))
5912            return VK_ERROR_VALIDATION_FAILED_EXT;
5913        DESCRIPTOR_POOL_NODE *pNewNode = new DESCRIPTOR_POOL_NODE(*pDescriptorPool, pCreateInfo);
5914        if (NULL == pNewNode) {
5915            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
5916                        (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
5917                        "Out of memory while attempting to allocate DESCRIPTOR_POOL_NODE in vkCreateDescriptorPool()"))
5918                return VK_ERROR_VALIDATION_FAILED_EXT;
5919        } else {
5920            std::lock_guard<std::mutex> lock(global_lock);
5921            dev_data->descriptorPoolMap[*pDescriptorPool] = pNewNode;
5922        }
5923    } else {
5924        // Need to do anything if pool create fails?
5925    }
5926    return result;
5927}
5928
5929VKAPI_ATTR VkResult VKAPI_CALL
5930ResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags) {
5931    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5932    VkResult result = dev_data->device_dispatch_table->ResetDescriptorPool(device, descriptorPool, flags);
5933    if (VK_SUCCESS == result) {
5934        std::lock_guard<std::mutex> lock(global_lock);
5935        clearDescriptorPool(dev_data, device, descriptorPool, flags);
5936    }
5937    return result;
5938}
5939// Ensure the pool contains enough descriptors and descriptor sets to satisfy
5940// an allocation request. Fills common_data with the total number of descriptors of each type required,
5941// as well as DescriptorSetLayout ptrs used for later update.
5942static bool PreCallValidateAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo,
5943                                                  cvdescriptorset::AllocateDescriptorSetsData *common_data) {
5944    // All state checks for AllocateDescriptorSets is done in single function
5945    return cvdescriptorset::ValidateAllocateDescriptorSets(dev_data->report_data, pAllocateInfo, dev_data, common_data);
5946}
5947// Allocation state was good and call down chain was made so update state based on allocating descriptor sets
5948static void PostCallRecordAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo,
5949                                                 VkDescriptorSet *pDescriptorSets,
5950                                                 const cvdescriptorset::AllocateDescriptorSetsData *common_data) {
5951    // All the updates are contained in a single cvdescriptorset function
5952    cvdescriptorset::PerformAllocateDescriptorSets(pAllocateInfo, pDescriptorSets, common_data, &dev_data->descriptorPoolMap,
5953                                                   &dev_data->setMap, dev_data);
5954}
5955
5956VKAPI_ATTR VkResult VKAPI_CALL
5957AllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo, VkDescriptorSet *pDescriptorSets) {
5958    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5959    std::unique_lock<std::mutex> lock(global_lock);
5960    cvdescriptorset::AllocateDescriptorSetsData common_data(pAllocateInfo->descriptorSetCount);
5961    bool skip_call = PreCallValidateAllocateDescriptorSets(dev_data, pAllocateInfo, &common_data);
5962    lock.unlock();
5963
5964    if (skip_call)
5965        return VK_ERROR_VALIDATION_FAILED_EXT;
5966
5967    VkResult result = dev_data->device_dispatch_table->AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
5968
5969    if (VK_SUCCESS == result) {
5970        lock.lock();
5971        PostCallRecordAllocateDescriptorSets(dev_data, pAllocateInfo, pDescriptorSets, &common_data);
5972        lock.unlock();
5973    }
5974    return result;
5975}
5976// Verify state before freeing DescriptorSets
5977static bool PreCallValidateFreeDescriptorSets(const layer_data *dev_data, VkDescriptorPool pool, uint32_t count,
5978                                              const VkDescriptorSet *descriptor_sets) {
5979    bool skip_call = false;
5980    // First make sure sets being destroyed are not currently in-use
5981    for (uint32_t i = 0; i < count; ++i)
5982        skip_call |= validateIdleDescriptorSet(dev_data, descriptor_sets[i], "vkFreeDescriptorSets");
5983
5984    DESCRIPTOR_POOL_NODE *pool_node = getPoolNode(dev_data, pool);
5985    if (pool_node && !(VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT & pool_node->createInfo.flags)) {
5986        // Can't Free from a NON_FREE pool
5987        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
5988                             reinterpret_cast<uint64_t &>(pool), __LINE__, DRAWSTATE_CANT_FREE_FROM_NON_FREE_POOL, "DS",
5989                             "It is invalid to call vkFreeDescriptorSets() with a pool created without setting "
5990                             "VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT.");
5991    }
5992    return skip_call;
5993}
5994// Sets have been removed from the pool so update underlying state
5995static void PostCallRecordFreeDescriptorSets(layer_data *dev_data, VkDescriptorPool pool, uint32_t count,
5996                                             const VkDescriptorSet *descriptor_sets) {
5997    DESCRIPTOR_POOL_NODE *pool_state = getPoolNode(dev_data, pool);
5998    // Update available descriptor sets in pool
5999    pool_state->availableSets += count;
6000
6001    // For each freed descriptor add its resources back into the pool as available and remove from pool and setMap
6002    for (uint32_t i = 0; i < count; ++i) {
6003        auto set_state = dev_data->setMap[descriptor_sets[i]];
6004        uint32_t type_index = 0, descriptor_count = 0;
6005        for (uint32_t j = 0; j < set_state->GetBindingCount(); ++j) {
6006            type_index = static_cast<uint32_t>(set_state->GetTypeFromIndex(j));
6007            descriptor_count = set_state->GetDescriptorCountFromIndex(j);
6008            pool_state->availableDescriptorTypeCount[type_index] += descriptor_count;
6009        }
6010        freeDescriptorSet(dev_data, set_state);
6011        pool_state->sets.erase(set_state);
6012    }
6013}
6014
6015VKAPI_ATTR VkResult VKAPI_CALL
6016FreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count, const VkDescriptorSet *pDescriptorSets) {
6017    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6018    // Make sure that no sets being destroyed are in-flight
6019    std::unique_lock<std::mutex> lock(global_lock);
6020    bool skipCall = PreCallValidateFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
6021    lock.unlock();
6022    if (skipCall)
6023        return VK_ERROR_VALIDATION_FAILED_EXT;
6024    VkResult result = dev_data->device_dispatch_table->FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets);
6025    if (VK_SUCCESS == result) {
6026        lock.lock();
6027        PostCallRecordFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
6028        lock.unlock();
6029    }
6030    return result;
6031}
6032// TODO : This is a Proof-of-concept for core validation architecture
6033//  Really we'll want to break out these functions to separate files but
6034//  keeping it all together here to prove out design
6035// PreCallValidate* handles validating all of the state prior to calling down chain to UpdateDescriptorSets()
6036static bool PreCallValidateUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
6037                                                const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
6038                                                const VkCopyDescriptorSet *pDescriptorCopies) {
6039    // First thing to do is perform map look-ups.
6040    // NOTE : UpdateDescriptorSets is somewhat unique in that it's operating on a number of DescriptorSets
6041    //  so we can't just do a single map look-up up-front, but do them individually in functions below
6042
6043    // Now make call(s) that validate state, but don't perform state updates in this function
6044    // Note, here DescriptorSets is unique in that we don't yet have an instance. Using a helper function in the
6045    //  namespace which will parse params and make calls into specific class instances
6046    return cvdescriptorset::ValidateUpdateDescriptorSets(dev_data->report_data, dev_data, descriptorWriteCount, pDescriptorWrites,
6047                                                         descriptorCopyCount, pDescriptorCopies);
6048}
6049// PostCallRecord* handles recording state updates following call down chain to UpdateDescriptorSets()
6050static void PostCallRecordUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
6051                                               const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
6052                                               const VkCopyDescriptorSet *pDescriptorCopies) {
6053    cvdescriptorset::PerformUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6054                                                 pDescriptorCopies);
6055}
6056
6057VKAPI_ATTR void VKAPI_CALL
6058UpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites,
6059                     uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pDescriptorCopies) {
6060    // Only map look-up at top level is for device-level layer_data
6061    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6062    std::unique_lock<std::mutex> lock(global_lock);
6063    bool skip_call = PreCallValidateUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6064                                                         pDescriptorCopies);
6065    lock.unlock();
6066    if (!skip_call) {
6067        dev_data->device_dispatch_table->UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6068                                                              pDescriptorCopies);
6069        lock.lock();
6070        // Since UpdateDescriptorSets() is void, nothing to check prior to updating state
6071        PostCallRecordUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6072                                           pDescriptorCopies);
6073    }
6074}
6075
6076VKAPI_ATTR VkResult VKAPI_CALL
6077AllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pCreateInfo, VkCommandBuffer *pCommandBuffer) {
6078    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6079    VkResult result = dev_data->device_dispatch_table->AllocateCommandBuffers(device, pCreateInfo, pCommandBuffer);
6080    if (VK_SUCCESS == result) {
6081        std::unique_lock<std::mutex> lock(global_lock);
6082        auto const &cp_it = dev_data->commandPoolMap.find(pCreateInfo->commandPool);
6083        if (cp_it != dev_data->commandPoolMap.end()) {
6084            for (uint32_t i = 0; i < pCreateInfo->commandBufferCount; i++) {
6085                // Add command buffer to its commandPool map
6086                cp_it->second.commandBuffers.push_back(pCommandBuffer[i]);
6087                GLOBAL_CB_NODE *pCB = new GLOBAL_CB_NODE;
6088                // Add command buffer to map
6089                dev_data->commandBufferMap[pCommandBuffer[i]] = pCB;
6090                resetCB(dev_data, pCommandBuffer[i]);
6091                pCB->createInfo = *pCreateInfo;
6092                pCB->device = device;
6093            }
6094        }
6095        printCBList(dev_data);
6096        lock.unlock();
6097    }
6098    return result;
6099}
6100
6101VKAPI_ATTR VkResult VKAPI_CALL
6102BeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) {
6103    bool skipCall = false;
6104    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6105    std::unique_lock<std::mutex> lock(global_lock);
6106    // Validate command buffer level
6107    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6108    if (pCB) {
6109        // This implicitly resets the Cmd Buffer so make sure any fence is done and then clear memory references
6110        if (dev_data->globalInFlightCmdBuffers.count(commandBuffer)) {
6111            skipCall |=
6112                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6113                        (uint64_t)commandBuffer, __LINE__, MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM",
6114                        "Calling vkBeginCommandBuffer() on active CB 0x%p before it has completed. "
6115                        "You must check CB fence before this call.",
6116                        commandBuffer);
6117        }
6118        clear_cmd_buf_and_mem_references(dev_data, pCB);
6119        if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
6120            // Secondary Command Buffer
6121            const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo;
6122            if (!pInfo) {
6123                skipCall |=
6124                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6125                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6126                            "vkBeginCommandBuffer(): Secondary Command Buffer (0x%p) must have inheritance info.",
6127                            reinterpret_cast<void *>(commandBuffer));
6128            } else {
6129                if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
6130                    if (!pInfo->renderPass) { // renderpass should NOT be null for a Secondary CB
6131                        skipCall |= log_msg(
6132                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6133                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6134                            "vkBeginCommandBuffer(): Secondary Command Buffers (0x%p) must specify a valid renderpass parameter.",
6135                            reinterpret_cast<void *>(commandBuffer));
6136                    }
6137                    if (!pInfo->framebuffer) { // framebuffer may be null for a Secondary CB, but this affects perf
6138                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
6139                                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6140                                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE,
6141                                            "DS", "vkBeginCommandBuffer(): Secondary Command Buffers (0x%p) may perform better if a "
6142                                                  "valid framebuffer parameter is specified.",
6143                                            reinterpret_cast<void *>(commandBuffer));
6144                    } else {
6145                        string errorString = "";
6146                        auto framebuffer = getFramebuffer(dev_data, pInfo->framebuffer);
6147                        if (framebuffer) {
6148                            VkRenderPass fbRP = framebuffer->createInfo.renderPass;
6149                            if (!verify_renderpass_compatibility(dev_data, fbRP, pInfo->renderPass, errorString)) {
6150                                // renderPass that framebuffer was created with must be compatible with local renderPass
6151                                skipCall |=
6152                                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6153                                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6154                                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE,
6155                                            "DS", "vkBeginCommandBuffer(): Secondary Command "
6156                                                  "Buffer (0x%p) renderPass (0x%" PRIxLEAST64 ") is incompatible w/ framebuffer "
6157                                                  "(0x%" PRIxLEAST64 ") w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
6158                                            reinterpret_cast<void *>(commandBuffer), (uint64_t)(pInfo->renderPass),
6159                                            (uint64_t)(pInfo->framebuffer), (uint64_t)(fbRP), errorString.c_str());
6160                            }
6161                            // Connect this framebuffer to this cmdBuffer
6162                            framebuffer->referencingCmdBuffers.insert(pCB->commandBuffer);
6163                        }
6164                    }
6165                }
6166                if ((pInfo->occlusionQueryEnable == VK_FALSE ||
6167                     dev_data->phys_dev_properties.features.occlusionQueryPrecise == VK_FALSE) &&
6168                    (pInfo->queryFlags & VK_QUERY_CONTROL_PRECISE_BIT)) {
6169                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6170                                        VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(commandBuffer),
6171                                        __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6172                                        "vkBeginCommandBuffer(): Secondary Command Buffer (0x%p) must not have "
6173                                        "VK_QUERY_CONTROL_PRECISE_BIT if occulusionQuery is disabled or the device does not "
6174                                        "support precise occlusion queries.",
6175                                        reinterpret_cast<void *>(commandBuffer));
6176                }
6177            }
6178            if (pInfo && pInfo->renderPass != VK_NULL_HANDLE) {
6179                auto renderPass = getRenderPass(dev_data, pInfo->renderPass);
6180                if (renderPass) {
6181                    if (pInfo->subpass >= renderPass->pCreateInfo->subpassCount) {
6182                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6183                                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__,
6184                                            DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6185                                            "vkBeginCommandBuffer(): Secondary Command Buffers (0x%p) must has a subpass index (%d) "
6186                                            "that is less than the number of subpasses (%d).",
6187                                            (void *)commandBuffer, pInfo->subpass, renderPass->pCreateInfo->subpassCount);
6188                    }
6189                }
6190            }
6191        }
6192        if (CB_RECORDING == pCB->state) {
6193            skipCall |=
6194                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6195                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6196                        "vkBeginCommandBuffer(): Cannot call Begin on CB (0x%" PRIxLEAST64
6197                        ") in the RECORDING state. Must first call vkEndCommandBuffer().",
6198                        (uint64_t)commandBuffer);
6199        } else if (CB_RECORDED == pCB->state || (CB_INVALID == pCB->state && CMD_END == pCB->cmds.back().type)) {
6200            VkCommandPool cmdPool = pCB->createInfo.commandPool;
6201            if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & dev_data->commandPoolMap[cmdPool].createFlags)) {
6202                skipCall |=
6203                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6204                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
6205                            "Call to vkBeginCommandBuffer() on command buffer (0x%" PRIxLEAST64
6206                            ") attempts to implicitly reset cmdBuffer created from command pool (0x%" PRIxLEAST64
6207                            ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
6208                            (uint64_t)commandBuffer, (uint64_t)cmdPool);
6209            }
6210            resetCB(dev_data, commandBuffer);
6211        }
6212        // Set updated state here in case implicit reset occurs above
6213        pCB->state = CB_RECORDING;
6214        pCB->beginInfo = *pBeginInfo;
6215        if (pCB->beginInfo.pInheritanceInfo) {
6216            pCB->inheritanceInfo = *(pCB->beginInfo.pInheritanceInfo);
6217            pCB->beginInfo.pInheritanceInfo = &pCB->inheritanceInfo;
6218            // If we are a secondary command-buffer and inheriting.  Update the items we should inherit.
6219            if ((pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) &&
6220                (pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
6221                pCB->activeRenderPass = getRenderPass(dev_data, pCB->beginInfo.pInheritanceInfo->renderPass);
6222                pCB->activeSubpass = pCB->beginInfo.pInheritanceInfo->subpass;
6223                pCB->framebuffers.insert(pCB->beginInfo.pInheritanceInfo->framebuffer);
6224            }
6225        }
6226    } else {
6227        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6228                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
6229                            "In vkBeginCommandBuffer() and unable to find CommandBuffer Node for CB 0x%p!", (void *)commandBuffer);
6230    }
6231    lock.unlock();
6232    if (skipCall) {
6233        return VK_ERROR_VALIDATION_FAILED_EXT;
6234    }
6235    VkResult result = dev_data->device_dispatch_table->BeginCommandBuffer(commandBuffer, pBeginInfo);
6236
6237    return result;
6238}
6239
6240VKAPI_ATTR VkResult VKAPI_CALL EndCommandBuffer(VkCommandBuffer commandBuffer) {
6241    bool skipCall = false;
6242    VkResult result = VK_SUCCESS;
6243    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6244    std::unique_lock<std::mutex> lock(global_lock);
6245    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6246    if (pCB) {
6247        if ((VK_COMMAND_BUFFER_LEVEL_PRIMARY == pCB->createInfo.level) || !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
6248            // This needs spec clarification to update valid usage, see comments in PR:
6249            // https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/pull/516#discussion_r63013756
6250            skipCall |= insideRenderPass(dev_data, pCB, "vkEndCommandBuffer");
6251        }
6252        skipCall |= addCmd(dev_data, pCB, CMD_END, "vkEndCommandBuffer()");
6253        for (auto query : pCB->activeQueries) {
6254            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6255                                DRAWSTATE_INVALID_QUERY, "DS",
6256                                "Ending command buffer with in progress query: queryPool 0x%" PRIx64 ", index %d",
6257                                (uint64_t)(query.pool), query.index);
6258        }
6259    }
6260    if (!skipCall) {
6261        lock.unlock();
6262        result = dev_data->device_dispatch_table->EndCommandBuffer(commandBuffer);
6263        lock.lock();
6264        if (VK_SUCCESS == result) {
6265            pCB->state = CB_RECORDED;
6266            // Reset CB status flags
6267            pCB->status = 0;
6268            printCB(dev_data, commandBuffer);
6269        }
6270    } else {
6271        result = VK_ERROR_VALIDATION_FAILED_EXT;
6272    }
6273    lock.unlock();
6274    return result;
6275}
6276
6277VKAPI_ATTR VkResult VKAPI_CALL
6278ResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) {
6279    bool skip_call = false;
6280    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6281    std::unique_lock<std::mutex> lock(global_lock);
6282    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6283    VkCommandPool cmdPool = pCB->createInfo.commandPool;
6284    if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & dev_data->commandPoolMap[cmdPool].createFlags)) {
6285        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6286                             (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
6287                             "Attempt to reset command buffer (0x%" PRIxLEAST64 ") created from command pool (0x%" PRIxLEAST64
6288                             ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
6289                             (uint64_t)commandBuffer, (uint64_t)cmdPool);
6290    }
6291    skip_call |= checkAndClearCommandBufferInFlight(dev_data, pCB, "reset");
6292    lock.unlock();
6293    if (skip_call)
6294        return VK_ERROR_VALIDATION_FAILED_EXT;
6295    VkResult result = dev_data->device_dispatch_table->ResetCommandBuffer(commandBuffer, flags);
6296    if (VK_SUCCESS == result) {
6297        lock.lock();
6298        resetCB(dev_data, commandBuffer);
6299        lock.unlock();
6300    }
6301    return result;
6302}
6303
6304VKAPI_ATTR void VKAPI_CALL
6305CmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline) {
6306    bool skipCall = false;
6307    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6308    std::unique_lock<std::mutex> lock(global_lock);
6309    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6310    if (pCB) {
6311        skipCall |= addCmd(dev_data, pCB, CMD_BINDPIPELINE, "vkCmdBindPipeline()");
6312        if ((VK_PIPELINE_BIND_POINT_COMPUTE == pipelineBindPoint) && (pCB->activeRenderPass)) {
6313            skipCall |=
6314                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
6315                        (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
6316                        "Incorrectly binding compute pipeline (0x%" PRIxLEAST64 ") during active RenderPass (0x%" PRIxLEAST64 ")",
6317                        (uint64_t)pipeline, (uint64_t)pCB->activeRenderPass->renderPass);
6318        }
6319
6320        PIPELINE_NODE *pPN = getPipeline(dev_data, pipeline);
6321        if (pPN) {
6322            pCB->lastBound[pipelineBindPoint].pipeline = pipeline;
6323            set_cb_pso_status(pCB, pPN);
6324            set_pipeline_state(pPN);
6325        } else {
6326            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
6327                                (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_PIPELINE, "DS",
6328                                "Attempt to bind Pipeline 0x%" PRIxLEAST64 " that doesn't exist!", (uint64_t)(pipeline));
6329        }
6330    }
6331    lock.unlock();
6332    if (!skipCall)
6333        dev_data->device_dispatch_table->CmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline);
6334}
6335
6336VKAPI_ATTR void VKAPI_CALL
6337CmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport *pViewports) {
6338    bool skipCall = false;
6339    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6340    std::unique_lock<std::mutex> lock(global_lock);
6341    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6342    if (pCB) {
6343        skipCall |= addCmd(dev_data, pCB, CMD_SETVIEWPORTSTATE, "vkCmdSetViewport()");
6344        pCB->status |= CBSTATUS_VIEWPORT_SET;
6345        pCB->viewports.resize(viewportCount);
6346        memcpy(pCB->viewports.data(), pViewports, viewportCount * sizeof(VkViewport));
6347    }
6348    lock.unlock();
6349    if (!skipCall)
6350        dev_data->device_dispatch_table->CmdSetViewport(commandBuffer, firstViewport, viewportCount, pViewports);
6351}
6352
6353VKAPI_ATTR void VKAPI_CALL
6354CmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D *pScissors) {
6355    bool skipCall = false;
6356    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6357    std::unique_lock<std::mutex> lock(global_lock);
6358    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6359    if (pCB) {
6360        skipCall |= addCmd(dev_data, pCB, CMD_SETSCISSORSTATE, "vkCmdSetScissor()");
6361        pCB->status |= CBSTATUS_SCISSOR_SET;
6362        pCB->scissors.resize(scissorCount);
6363        memcpy(pCB->scissors.data(), pScissors, scissorCount * sizeof(VkRect2D));
6364    }
6365    lock.unlock();
6366    if (!skipCall)
6367        dev_data->device_dispatch_table->CmdSetScissor(commandBuffer, firstScissor, scissorCount, pScissors);
6368}
6369
6370VKAPI_ATTR void VKAPI_CALL CmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) {
6371    bool skip_call = false;
6372    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6373    std::unique_lock<std::mutex> lock(global_lock);
6374    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6375    if (pCB) {
6376        skip_call |= addCmd(dev_data, pCB, CMD_SETLINEWIDTHSTATE, "vkCmdSetLineWidth()");
6377        pCB->status |= CBSTATUS_LINE_WIDTH_SET;
6378
6379        PIPELINE_NODE *pPipeTrav = getPipeline(dev_data, pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline);
6380        if (pPipeTrav != NULL && !isDynamic(pPipeTrav, VK_DYNAMIC_STATE_LINE_WIDTH)) {
6381            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
6382                                 reinterpret_cast<uint64_t &>(commandBuffer), __LINE__, DRAWSTATE_INVALID_SET, "DS",
6383                                 "vkCmdSetLineWidth called but pipeline was created without VK_DYNAMIC_STATE_LINE_WIDTH "
6384                                 "flag.  This is undefined behavior and could be ignored.");
6385        } else {
6386            skip_call |= verifyLineWidth(dev_data, DRAWSTATE_INVALID_SET, reinterpret_cast<uint64_t &>(commandBuffer), lineWidth);
6387        }
6388    }
6389    lock.unlock();
6390    if (!skip_call)
6391        dev_data->device_dispatch_table->CmdSetLineWidth(commandBuffer, lineWidth);
6392}
6393
6394VKAPI_ATTR void VKAPI_CALL
6395CmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor) {
6396    bool skipCall = false;
6397    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6398    std::unique_lock<std::mutex> lock(global_lock);
6399    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6400    if (pCB) {
6401        skipCall |= addCmd(dev_data, pCB, CMD_SETDEPTHBIASSTATE, "vkCmdSetDepthBias()");
6402        pCB->status |= CBSTATUS_DEPTH_BIAS_SET;
6403    }
6404    lock.unlock();
6405    if (!skipCall)
6406        dev_data->device_dispatch_table->CmdSetDepthBias(commandBuffer, depthBiasConstantFactor, depthBiasClamp,
6407                                                         depthBiasSlopeFactor);
6408}
6409
6410VKAPI_ATTR void VKAPI_CALL CmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) {
6411    bool skipCall = false;
6412    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6413    std::unique_lock<std::mutex> lock(global_lock);
6414    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6415    if (pCB) {
6416        skipCall |= addCmd(dev_data, pCB, CMD_SETBLENDSTATE, "vkCmdSetBlendConstants()");
6417        pCB->status |= CBSTATUS_BLEND_CONSTANTS_SET;
6418    }
6419    lock.unlock();
6420    if (!skipCall)
6421        dev_data->device_dispatch_table->CmdSetBlendConstants(commandBuffer, blendConstants);
6422}
6423
6424VKAPI_ATTR void VKAPI_CALL
6425CmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) {
6426    bool skipCall = false;
6427    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6428    std::unique_lock<std::mutex> lock(global_lock);
6429    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6430    if (pCB) {
6431        skipCall |= addCmd(dev_data, pCB, CMD_SETDEPTHBOUNDSSTATE, "vkCmdSetDepthBounds()");
6432        pCB->status |= CBSTATUS_DEPTH_BOUNDS_SET;
6433    }
6434    lock.unlock();
6435    if (!skipCall)
6436        dev_data->device_dispatch_table->CmdSetDepthBounds(commandBuffer, minDepthBounds, maxDepthBounds);
6437}
6438
6439VKAPI_ATTR void VKAPI_CALL
6440CmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask) {
6441    bool skipCall = false;
6442    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6443    std::unique_lock<std::mutex> lock(global_lock);
6444    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6445    if (pCB) {
6446        skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILREADMASKSTATE, "vkCmdSetStencilCompareMask()");
6447        pCB->status |= CBSTATUS_STENCIL_READ_MASK_SET;
6448    }
6449    lock.unlock();
6450    if (!skipCall)
6451        dev_data->device_dispatch_table->CmdSetStencilCompareMask(commandBuffer, faceMask, compareMask);
6452}
6453
6454VKAPI_ATTR void VKAPI_CALL
6455CmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask) {
6456    bool skipCall = false;
6457    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6458    std::unique_lock<std::mutex> lock(global_lock);
6459    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6460    if (pCB) {
6461        skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILWRITEMASKSTATE, "vkCmdSetStencilWriteMask()");
6462        pCB->status |= CBSTATUS_STENCIL_WRITE_MASK_SET;
6463    }
6464    lock.unlock();
6465    if (!skipCall)
6466        dev_data->device_dispatch_table->CmdSetStencilWriteMask(commandBuffer, faceMask, writeMask);
6467}
6468
6469VKAPI_ATTR void VKAPI_CALL
6470CmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference) {
6471    bool skipCall = false;
6472    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6473    std::unique_lock<std::mutex> lock(global_lock);
6474    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6475    if (pCB) {
6476        skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILREFERENCESTATE, "vkCmdSetStencilReference()");
6477        pCB->status |= CBSTATUS_STENCIL_REFERENCE_SET;
6478    }
6479    lock.unlock();
6480    if (!skipCall)
6481        dev_data->device_dispatch_table->CmdSetStencilReference(commandBuffer, faceMask, reference);
6482}
6483
6484VKAPI_ATTR void VKAPI_CALL
6485CmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout,
6486                      uint32_t firstSet, uint32_t setCount, const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount,
6487                      const uint32_t *pDynamicOffsets) {
6488    bool skipCall = false;
6489    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6490    std::unique_lock<std::mutex> lock(global_lock);
6491    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6492    if (pCB) {
6493        if (pCB->state == CB_RECORDING) {
6494            // Track total count of dynamic descriptor types to make sure we have an offset for each one
6495            uint32_t totalDynamicDescriptors = 0;
6496            string errorString = "";
6497            uint32_t lastSetIndex = firstSet + setCount - 1;
6498            if (lastSetIndex >= pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size()) {
6499                pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1);
6500                pCB->lastBound[pipelineBindPoint].dynamicOffsets.resize(lastSetIndex + 1);
6501            }
6502            auto oldFinalBoundSet = pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex];
6503            for (uint32_t i = 0; i < setCount; i++) {
6504                cvdescriptorset::DescriptorSet *pSet = getSetNode(dev_data, pDescriptorSets[i]);
6505                if (pSet) {
6506                    pCB->lastBound[pipelineBindPoint].uniqueBoundSets.insert(pSet);
6507                    pSet->BindCommandBuffer(pCB);
6508                    pCB->lastBound[pipelineBindPoint].pipelineLayout = layout;
6509                    pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i + firstSet] = pSet;
6510                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
6511                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
6512                                        DRAWSTATE_NONE, "DS", "DS 0x%" PRIxLEAST64 " bound on pipeline %s",
6513                                        (uint64_t)pDescriptorSets[i], string_VkPipelineBindPoint(pipelineBindPoint));
6514                    if (!pSet->IsUpdated() && (pSet->GetTotalDescriptorCount() != 0)) {
6515                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
6516                                            VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i],
6517                                            __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
6518                                            "DS 0x%" PRIxLEAST64
6519                                            " bound but it was never updated. You may want to either update it or not bind it.",
6520                                            (uint64_t)pDescriptorSets[i]);
6521                    }
6522                    // Verify that set being bound is compatible with overlapping setLayout of pipelineLayout
6523                    if (!verify_set_layout_compatibility(dev_data, pSet, layout, i + firstSet, errorString)) {
6524                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6525                                            VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
6526                                            DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
6527                                            "descriptorSet #%u being bound is not compatible with overlapping descriptorSetLayout "
6528                                            "at index %u of pipelineLayout 0x%" PRIxLEAST64 " due to: %s",
6529                                            i, i + firstSet, reinterpret_cast<uint64_t &>(layout), errorString.c_str());
6530                    }
6531
6532                    auto setDynamicDescriptorCount = pSet->GetDynamicDescriptorCount();
6533
6534                    pCB->lastBound[pipelineBindPoint].dynamicOffsets[firstSet + i].clear();
6535
6536                    if (setDynamicDescriptorCount) {
6537                        // First make sure we won't overstep bounds of pDynamicOffsets array
6538                        if ((totalDynamicDescriptors + setDynamicDescriptorCount) > dynamicOffsetCount) {
6539                            skipCall |=
6540                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6541                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
6542                                        DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
6543                                        "descriptorSet #%u (0x%" PRIxLEAST64
6544                                        ") requires %u dynamicOffsets, but only %u dynamicOffsets are left in pDynamicOffsets "
6545                                        "array. There must be one dynamic offset for each dynamic descriptor being bound.",
6546                                        i, (uint64_t)pDescriptorSets[i], pSet->GetDynamicDescriptorCount(),
6547                                        (dynamicOffsetCount - totalDynamicDescriptors));
6548                        } else { // Validate and store dynamic offsets with the set
6549                            // Validate Dynamic Offset Minimums
6550                            uint32_t cur_dyn_offset = totalDynamicDescriptors;
6551                            for (uint32_t d = 0; d < pSet->GetTotalDescriptorCount(); d++) {
6552                                if (pSet->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
6553                                    if (vk_safe_modulo(
6554                                            pDynamicOffsets[cur_dyn_offset],
6555                                            dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment) != 0) {
6556                                        skipCall |= log_msg(
6557                                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6558                                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
6559                                            DRAWSTATE_INVALID_UNIFORM_BUFFER_OFFSET, "DS",
6560                                            "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
6561                                            "device limit minUniformBufferOffsetAlignment 0x%" PRIxLEAST64,
6562                                            cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
6563                                            dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment);
6564                                    }
6565                                    cur_dyn_offset++;
6566                                } else if (pSet->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
6567                                    if (vk_safe_modulo(
6568                                            pDynamicOffsets[cur_dyn_offset],
6569                                            dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment) != 0) {
6570                                        skipCall |= log_msg(
6571                                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6572                                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
6573                                            DRAWSTATE_INVALID_STORAGE_BUFFER_OFFSET, "DS",
6574                                            "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
6575                                            "device limit minStorageBufferOffsetAlignment 0x%" PRIxLEAST64,
6576                                            cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
6577                                            dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment);
6578                                    }
6579                                    cur_dyn_offset++;
6580                                }
6581                            }
6582
6583                            pCB->lastBound[pipelineBindPoint].dynamicOffsets[firstSet + i] =
6584                                std::vector<uint32_t>(pDynamicOffsets + totalDynamicDescriptors,
6585                                                      pDynamicOffsets + totalDynamicDescriptors + setDynamicDescriptorCount);
6586                            // Keep running total of dynamic descriptor count to verify at the end
6587                            totalDynamicDescriptors += setDynamicDescriptorCount;
6588
6589                        }
6590                    }
6591                } else {
6592                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6593                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
6594                                        DRAWSTATE_INVALID_SET, "DS", "Attempt to bind DS 0x%" PRIxLEAST64 " that doesn't exist!",
6595                                        (uint64_t)pDescriptorSets[i]);
6596                }
6597                skipCall |= addCmd(dev_data, pCB, CMD_BINDDESCRIPTORSETS, "vkCmdBindDescriptorSets()");
6598                // For any previously bound sets, need to set them to "invalid" if they were disturbed by this update
6599                if (firstSet > 0) { // Check set #s below the first bound set
6600                    for (uint32_t i = 0; i < firstSet; ++i) {
6601                        if (pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] &&
6602                            !verify_set_layout_compatibility(dev_data, pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i],
6603                                                             layout, i, errorString)) {
6604                            skipCall |= log_msg(
6605                                dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
6606                                VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
6607                                (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], __LINE__, DRAWSTATE_NONE, "DS",
6608                                "DescriptorSetDS 0x%" PRIxLEAST64
6609                                " previously bound as set #%u was disturbed by newly bound pipelineLayout (0x%" PRIxLEAST64 ")",
6610                                (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], i, (uint64_t)layout);
6611                            pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] = VK_NULL_HANDLE;
6612                        }
6613                    }
6614                }
6615                // Check if newly last bound set invalidates any remaining bound sets
6616                if ((pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size() - 1) > (lastSetIndex)) {
6617                    if (oldFinalBoundSet &&
6618                        !verify_set_layout_compatibility(dev_data, oldFinalBoundSet, layout, lastSetIndex, errorString)) {
6619                        auto old_set = oldFinalBoundSet->GetSet();
6620                        skipCall |=
6621                            log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
6622                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, reinterpret_cast<uint64_t &>(old_set), __LINE__,
6623                                    DRAWSTATE_NONE, "DS", "DescriptorSetDS 0x%" PRIxLEAST64
6624                                                          " previously bound as set #%u is incompatible with set 0x%" PRIxLEAST64
6625                                                          " newly bound as set #%u so set #%u and any subsequent sets were "
6626                                                          "disturbed by newly bound pipelineLayout (0x%" PRIxLEAST64 ")",
6627                                    reinterpret_cast<uint64_t &>(old_set), lastSetIndex,
6628                                    (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex], lastSetIndex,
6629                                    lastSetIndex + 1, (uint64_t)layout);
6630                        pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1);
6631                    }
6632                }
6633            }
6634            //  dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound
6635            if (totalDynamicDescriptors != dynamicOffsetCount) {
6636                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6637                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__,
6638                                    DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
6639                                    "Attempting to bind %u descriptorSets with %u dynamic descriptors, but dynamicOffsetCount "
6640                                    "is %u. It should exactly match the number of dynamic descriptors.",
6641                                    setCount, totalDynamicDescriptors, dynamicOffsetCount);
6642            }
6643        } else {
6644            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindDescriptorSets()");
6645        }
6646    }
6647    lock.unlock();
6648    if (!skipCall)
6649        dev_data->device_dispatch_table->CmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet, setCount,
6650                                                               pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
6651}
6652
6653VKAPI_ATTR void VKAPI_CALL
6654CmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType) {
6655    bool skipCall = false;
6656    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6657    // TODO : Somewhere need to verify that IBs have correct usage state flagged
6658    std::unique_lock<std::mutex> lock(global_lock);
6659    VkDeviceMemory mem;
6660    skipCall =
6661        get_mem_binding_from_object(dev_data, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
6662    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
6663    if (cb_data != dev_data->commandBufferMap.end()) {
6664        std::function<bool()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdBindIndexBuffer()"); };
6665        cb_data->second->validate_functions.push_back(function);
6666        skipCall |= addCmd(dev_data, cb_data->second, CMD_BINDINDEXBUFFER, "vkCmdBindIndexBuffer()");
6667        VkDeviceSize offset_align = 0;
6668        switch (indexType) {
6669        case VK_INDEX_TYPE_UINT16:
6670            offset_align = 2;
6671            break;
6672        case VK_INDEX_TYPE_UINT32:
6673            offset_align = 4;
6674            break;
6675        default:
6676            // ParamChecker should catch bad enum, we'll also throw alignment error below if offset_align stays 0
6677            break;
6678        }
6679        if (!offset_align || (offset % offset_align)) {
6680            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6681                                DRAWSTATE_VTX_INDEX_ALIGNMENT_ERROR, "DS",
6682                                "vkCmdBindIndexBuffer() offset (0x%" PRIxLEAST64 ") does not fall on alignment (%s) boundary.",
6683                                offset, string_VkIndexType(indexType));
6684        }
6685        cb_data->second->status |= CBSTATUS_INDEX_BUFFER_BOUND;
6686    }
6687    lock.unlock();
6688    if (!skipCall)
6689        dev_data->device_dispatch_table->CmdBindIndexBuffer(commandBuffer, buffer, offset, indexType);
6690}
6691
6692void updateResourceTracking(GLOBAL_CB_NODE *pCB, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers) {
6693    uint32_t end = firstBinding + bindingCount;
6694    if (pCB->currentDrawData.buffers.size() < end) {
6695        pCB->currentDrawData.buffers.resize(end);
6696    }
6697    for (uint32_t i = 0; i < bindingCount; ++i) {
6698        pCB->currentDrawData.buffers[i + firstBinding] = pBuffers[i];
6699    }
6700}
6701
6702static inline void updateResourceTrackingOnDraw(GLOBAL_CB_NODE *pCB) { pCB->drawData.push_back(pCB->currentDrawData); }
6703
6704VKAPI_ATTR void VKAPI_CALL CmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding,
6705                                                uint32_t bindingCount, const VkBuffer *pBuffers,
6706                                                const VkDeviceSize *pOffsets) {
6707    bool skipCall = false;
6708    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6709    // TODO : Somewhere need to verify that VBs have correct usage state flagged
6710    std::unique_lock<std::mutex> lock(global_lock);
6711    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
6712    if (cb_data != dev_data->commandBufferMap.end()) {
6713        for (uint32_t i = 0; i < bindingCount; ++i) {
6714            VkDeviceMemory mem;
6715            skipCall |= get_mem_binding_from_object(dev_data, (uint64_t)pBuffers[i], VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
6716
6717            std::function<bool()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdBindVertexBuffers()"); };
6718            cb_data->second->validate_functions.push_back(function);
6719        }
6720        addCmd(dev_data, cb_data->second, CMD_BINDVERTEXBUFFER, "vkCmdBindVertexBuffer()");
6721        updateResourceTracking(cb_data->second, firstBinding, bindingCount, pBuffers);
6722    } else {
6723        skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindVertexBuffer()");
6724    }
6725    lock.unlock();
6726    if (!skipCall)
6727        dev_data->device_dispatch_table->CmdBindVertexBuffers(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets);
6728}
6729
6730/* expects global_lock to be held by caller */
6731static bool markStoreImagesAndBuffersAsWritten(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
6732    bool skip_call = false;
6733
6734    for (auto imageView : pCB->updateImages) {
6735        auto iv_data = getImageViewData(dev_data, imageView);
6736        if (!iv_data)
6737            continue;
6738        VkImage image = iv_data->image;
6739        VkDeviceMemory mem;
6740        skip_call |=
6741            get_mem_binding_from_object(dev_data, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
6742        std::function<bool()> function = [=]() {
6743            set_memory_valid(dev_data, mem, true, image);
6744            return false;
6745        };
6746        pCB->validate_functions.push_back(function);
6747    }
6748    for (auto buffer : pCB->updateBuffers) {
6749        VkDeviceMemory mem;
6750        skip_call |= get_mem_binding_from_object(dev_data, (uint64_t)buffer,
6751                                                 VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
6752        std::function<bool()> function = [=]() {
6753            set_memory_valid(dev_data, mem, true);
6754            return false;
6755        };
6756        pCB->validate_functions.push_back(function);
6757    }
6758    return skip_call;
6759}
6760
6761VKAPI_ATTR void VKAPI_CALL CmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
6762                                   uint32_t firstVertex, uint32_t firstInstance) {
6763    bool skipCall = false;
6764    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6765    std::unique_lock<std::mutex> lock(global_lock);
6766    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6767    if (pCB) {
6768        skipCall |= addCmd(dev_data, pCB, CMD_DRAW, "vkCmdDraw()");
6769        pCB->drawCount[DRAW]++;
6770        skipCall |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_GRAPHICS);
6771        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
6772        // TODO : Need to pass commandBuffer as srcObj here
6773        skipCall |=
6774            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
6775                    __LINE__, DRAWSTATE_NONE, "DS", "vkCmdDraw() call 0x%" PRIx64 ", reporting DS state:", g_drawCount[DRAW]++);
6776        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
6777        if (!skipCall) {
6778            updateResourceTrackingOnDraw(pCB);
6779        }
6780        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDraw");
6781    }
6782    lock.unlock();
6783    if (!skipCall)
6784        dev_data->device_dispatch_table->CmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
6785}
6786
6787VKAPI_ATTR void VKAPI_CALL CmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount,
6788                                          uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset,
6789                                                            uint32_t firstInstance) {
6790    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6791    bool skipCall = false;
6792    std::unique_lock<std::mutex> lock(global_lock);
6793    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6794    if (pCB) {
6795        skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDEXED, "vkCmdDrawIndexed()");
6796        pCB->drawCount[DRAW_INDEXED]++;
6797        skipCall |= validate_and_update_draw_state(dev_data, pCB, true, VK_PIPELINE_BIND_POINT_GRAPHICS);
6798        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
6799        // TODO : Need to pass commandBuffer as srcObj here
6800        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
6801                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS",
6802                            "vkCmdDrawIndexed() call 0x%" PRIx64 ", reporting DS state:", g_drawCount[DRAW_INDEXED]++);
6803        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
6804        if (!skipCall) {
6805            updateResourceTrackingOnDraw(pCB);
6806        }
6807        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndexed");
6808    }
6809    lock.unlock();
6810    if (!skipCall)
6811        dev_data->device_dispatch_table->CmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset,
6812                                                        firstInstance);
6813}
6814
6815VKAPI_ATTR void VKAPI_CALL
6816CmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
6817    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6818    bool skipCall = false;
6819    std::unique_lock<std::mutex> lock(global_lock);
6820    VkDeviceMemory mem;
6821    // MTMTODO : merge with code below
6822    skipCall =
6823        get_mem_binding_from_object(dev_data, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
6824    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdDrawIndirect");
6825    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6826    if (pCB) {
6827        skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDIRECT, "vkCmdDrawIndirect()");
6828        pCB->drawCount[DRAW_INDIRECT]++;
6829        skipCall |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_GRAPHICS);
6830        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
6831        // TODO : Need to pass commandBuffer as srcObj here
6832        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
6833                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS",
6834                            "vkCmdDrawIndirect() call 0x%" PRIx64 ", reporting DS state:", g_drawCount[DRAW_INDIRECT]++);
6835        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
6836        if (!skipCall) {
6837            updateResourceTrackingOnDraw(pCB);
6838        }
6839        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndirect");
6840    }
6841    lock.unlock();
6842    if (!skipCall)
6843        dev_data->device_dispatch_table->CmdDrawIndirect(commandBuffer, buffer, offset, count, stride);
6844}
6845
6846VKAPI_ATTR void VKAPI_CALL
6847CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
6848    bool skipCall = false;
6849    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6850    std::unique_lock<std::mutex> lock(global_lock);
6851    VkDeviceMemory mem;
6852    // MTMTODO : merge with code below
6853    skipCall =
6854        get_mem_binding_from_object(dev_data, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
6855    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdDrawIndexedIndirect");
6856    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6857    if (pCB) {
6858        skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDEXEDINDIRECT, "vkCmdDrawIndexedIndirect()");
6859        pCB->drawCount[DRAW_INDEXED_INDIRECT]++;
6860        skipCall |= validate_and_update_draw_state(dev_data, pCB, true, VK_PIPELINE_BIND_POINT_GRAPHICS);
6861        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
6862        // TODO : Need to pass commandBuffer as srcObj here
6863        skipCall |=
6864            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
6865                    __LINE__, DRAWSTATE_NONE, "DS", "vkCmdDrawIndexedIndirect() call 0x%" PRIx64 ", reporting DS state:",
6866                    g_drawCount[DRAW_INDEXED_INDIRECT]++);
6867        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
6868        if (!skipCall) {
6869            updateResourceTrackingOnDraw(pCB);
6870        }
6871        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndexedIndirect");
6872    }
6873    lock.unlock();
6874    if (!skipCall)
6875        dev_data->device_dispatch_table->CmdDrawIndexedIndirect(commandBuffer, buffer, offset, count, stride);
6876}
6877
6878VKAPI_ATTR void VKAPI_CALL CmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
6879    bool skipCall = false;
6880    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6881    std::unique_lock<std::mutex> lock(global_lock);
6882    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6883    if (pCB) {
6884        skipCall |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_COMPUTE);
6885        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
6886        skipCall |= addCmd(dev_data, pCB, CMD_DISPATCH, "vkCmdDispatch()");
6887        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdDispatch");
6888    }
6889    lock.unlock();
6890    if (!skipCall)
6891        dev_data->device_dispatch_table->CmdDispatch(commandBuffer, x, y, z);
6892}
6893
6894VKAPI_ATTR void VKAPI_CALL
6895CmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
6896    bool skipCall = false;
6897    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6898    std::unique_lock<std::mutex> lock(global_lock);
6899    VkDeviceMemory mem;
6900    skipCall =
6901        get_mem_binding_from_object(dev_data, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
6902    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdDispatchIndirect");
6903    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6904    if (pCB) {
6905        skipCall |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_COMPUTE);
6906        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
6907        skipCall |= addCmd(dev_data, pCB, CMD_DISPATCHINDIRECT, "vkCmdDispatchIndirect()");
6908        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdDispatchIndirect");
6909    }
6910    lock.unlock();
6911    if (!skipCall)
6912        dev_data->device_dispatch_table->CmdDispatchIndirect(commandBuffer, buffer, offset);
6913}
6914
6915VKAPI_ATTR void VKAPI_CALL CmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
6916                                         uint32_t regionCount, const VkBufferCopy *pRegions) {
6917    bool skipCall = false;
6918    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6919    std::unique_lock<std::mutex> lock(global_lock);
6920    VkDeviceMemory src_mem, dst_mem;
6921    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)srcBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &src_mem);
6922    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, src_mem, "vkCmdCopyBuffer");
6923    skipCall |= get_mem_binding_from_object(dev_data, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &dst_mem);
6924
6925    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, dst_mem, "vkCmdCopyBuffer");
6926    // Validate that SRC & DST buffers have correct usage flags set
6927    skipCall |= validate_buffer_usage_flags(dev_data, srcBuffer, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true,
6928                                            "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
6929    skipCall |= validate_buffer_usage_flags(dev_data, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
6930                                            "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
6931    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
6932    if (cb_data != dev_data->commandBufferMap.end()) {
6933        std::function<bool()> function = [=]() { return validate_memory_is_valid(dev_data, src_mem, "vkCmdCopyBuffer()"); };
6934        cb_data->second->validate_functions.push_back(function);
6935        function = [=]() {
6936            set_memory_valid(dev_data, dst_mem, true);
6937            return false;
6938        };
6939        cb_data->second->validate_functions.push_back(function);
6940
6941        skipCall |= addCmd(dev_data, cb_data->second, CMD_COPYBUFFER, "vkCmdCopyBuffer()");
6942        skipCall |= insideRenderPass(dev_data, cb_data->second, "vkCmdCopyBuffer");
6943    }
6944    lock.unlock();
6945    if (!skipCall)
6946        dev_data->device_dispatch_table->CmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions);
6947}
6948
6949static bool VerifySourceImageLayout(VkCommandBuffer cmdBuffer, VkImage srcImage, VkImageSubresourceLayers subLayers,
6950                                    VkImageLayout srcImageLayout) {
6951    bool skip_call = false;
6952
6953    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
6954    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
6955    for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
6956        uint32_t layer = i + subLayers.baseArrayLayer;
6957        VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
6958        IMAGE_CMD_BUF_LAYOUT_NODE node;
6959        if (!FindLayout(pCB, srcImage, sub, node)) {
6960            SetLayout(pCB, srcImage, sub, IMAGE_CMD_BUF_LAYOUT_NODE(srcImageLayout, srcImageLayout));
6961            continue;
6962        }
6963        if (node.layout != srcImageLayout) {
6964            // TODO: Improve log message in the next pass
6965            skip_call |=
6966                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
6967                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose source layout is %s "
6968                                                                        "and doesn't match the current layout %s.",
6969                        string_VkImageLayout(srcImageLayout), string_VkImageLayout(node.layout));
6970        }
6971    }
6972    if (srcImageLayout != VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL) {
6973        if (srcImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
6974            // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
6975            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
6976                                 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
6977                                 "Layout for input image should be TRANSFER_SRC_OPTIMAL instead of GENERAL.");
6978        } else {
6979            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6980                                 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Layout for input image is %s but can only be "
6981                                                                       "TRANSFER_SRC_OPTIMAL or GENERAL.",
6982                                 string_VkImageLayout(srcImageLayout));
6983        }
6984    }
6985    return skip_call;
6986}
6987
6988static bool VerifyDestImageLayout(VkCommandBuffer cmdBuffer, VkImage destImage, VkImageSubresourceLayers subLayers,
6989                                  VkImageLayout destImageLayout) {
6990    bool skip_call = false;
6991
6992    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
6993    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
6994    for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
6995        uint32_t layer = i + subLayers.baseArrayLayer;
6996        VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
6997        IMAGE_CMD_BUF_LAYOUT_NODE node;
6998        if (!FindLayout(pCB, destImage, sub, node)) {
6999            SetLayout(pCB, destImage, sub, IMAGE_CMD_BUF_LAYOUT_NODE(destImageLayout, destImageLayout));
7000            continue;
7001        }
7002        if (node.layout != destImageLayout) {
7003            skip_call |=
7004                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7005                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose dest layout is %s and "
7006                                                                        "doesn't match the current layout %s.",
7007                        string_VkImageLayout(destImageLayout), string_VkImageLayout(node.layout));
7008        }
7009    }
7010    if (destImageLayout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
7011        if (destImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
7012            // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
7013            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
7014                                 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
7015                                 "Layout for output image should be TRANSFER_DST_OPTIMAL instead of GENERAL.");
7016        } else {
7017            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7018                                 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Layout for output image is %s but can only be "
7019                                                                       "TRANSFER_DST_OPTIMAL or GENERAL.",
7020                                 string_VkImageLayout(destImageLayout));
7021        }
7022    }
7023    return skip_call;
7024}
7025
7026VKAPI_ATTR void VKAPI_CALL
7027CmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
7028             VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy *pRegions) {
7029    bool skipCall = false;
7030    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7031    std::unique_lock<std::mutex> lock(global_lock);
7032    VkDeviceMemory src_mem, dst_mem;
7033    // Validate that src & dst images have correct usage flags set
7034    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &src_mem);
7035    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, src_mem, "vkCmdCopyImage");
7036
7037    skipCall |= get_mem_binding_from_object(dev_data, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &dst_mem);
7038    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, dst_mem, "vkCmdCopyImage");
7039    skipCall |= validate_image_usage_flags(dev_data, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
7040                                           "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
7041    skipCall |= validate_image_usage_flags(dev_data, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
7042                                           "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
7043    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7044    if (cb_data != dev_data->commandBufferMap.end()) {
7045        std::function<bool()> function = [=]() {
7046            return validate_memory_is_valid(dev_data, src_mem, "vkCmdCopyImage()", srcImage);
7047        };
7048        cb_data->second->validate_functions.push_back(function);
7049        function = [=]() {
7050            set_memory_valid(dev_data, dst_mem, true, dstImage);
7051            return false;
7052        };
7053        cb_data->second->validate_functions.push_back(function);
7054
7055        skipCall |= addCmd(dev_data, cb_data->second, CMD_COPYIMAGE, "vkCmdCopyImage()");
7056        skipCall |= insideRenderPass(dev_data, cb_data->second, "vkCmdCopyImage");
7057        for (uint32_t i = 0; i < regionCount; ++i) {
7058            skipCall |= VerifySourceImageLayout(commandBuffer, srcImage, pRegions[i].srcSubresource, srcImageLayout);
7059            skipCall |= VerifyDestImageLayout(commandBuffer, dstImage, pRegions[i].dstSubresource, dstImageLayout);
7060        }
7061    }
7062    lock.unlock();
7063    if (!skipCall)
7064        dev_data->device_dispatch_table->CmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
7065                                                      regionCount, pRegions);
7066}
7067
7068VKAPI_ATTR void VKAPI_CALL
7069CmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
7070             VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit *pRegions, VkFilter filter) {
7071    bool skipCall = false;
7072    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7073    std::unique_lock<std::mutex> lock(global_lock);
7074    VkDeviceMemory src_mem, dst_mem;
7075    // Validate that src & dst images have correct usage flags set
7076    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &src_mem);
7077    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, src_mem, "vkCmdBlitImage");
7078
7079    skipCall |= get_mem_binding_from_object(dev_data, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &dst_mem);
7080    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, dst_mem, "vkCmdBlitImage");
7081    skipCall |= validate_image_usage_flags(dev_data, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
7082                                           "vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
7083    skipCall |= validate_image_usage_flags(dev_data, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
7084                                           "vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
7085
7086    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7087    if (cb_data != dev_data->commandBufferMap.end()) {
7088        std::function<bool()> function = [=]() {
7089            return validate_memory_is_valid(dev_data, src_mem, "vkCmdBlitImage()", srcImage);
7090        };
7091        cb_data->second->validate_functions.push_back(function);
7092        function = [=]() {
7093            set_memory_valid(dev_data, dst_mem, true, dstImage);
7094            return false;
7095        };
7096        cb_data->second->validate_functions.push_back(function);
7097
7098        skipCall |= addCmd(dev_data, cb_data->second, CMD_BLITIMAGE, "vkCmdBlitImage()");
7099        skipCall |= insideRenderPass(dev_data, cb_data->second, "vkCmdBlitImage");
7100    }
7101    lock.unlock();
7102    if (!skipCall)
7103        dev_data->device_dispatch_table->CmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
7104                                                      regionCount, pRegions, filter);
7105}
7106
7107VKAPI_ATTR void VKAPI_CALL CmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer,
7108                                                VkImage dstImage, VkImageLayout dstImageLayout,
7109                                                uint32_t regionCount, const VkBufferImageCopy *pRegions) {
7110    bool skipCall = false;
7111    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7112    std::unique_lock<std::mutex> lock(global_lock);
7113    VkDeviceMemory dst_mem, src_mem;
7114    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &dst_mem);
7115    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, dst_mem, "vkCmdCopyBufferToImage");
7116
7117    skipCall |= get_mem_binding_from_object(dev_data, (uint64_t)srcBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &src_mem);
7118    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, src_mem, "vkCmdCopyBufferToImage");
7119    // Validate that src buff & dst image have correct usage flags set
7120    skipCall |= validate_buffer_usage_flags(dev_data, srcBuffer, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true, "vkCmdCopyBufferToImage()",
7121                                            "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
7122    skipCall |= validate_image_usage_flags(dev_data, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true, "vkCmdCopyBufferToImage()",
7123                                           "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
7124    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7125    if (cb_data != dev_data->commandBufferMap.end()) {
7126        std::function<bool()> function = [=]() {
7127            set_memory_valid(dev_data, dst_mem, true, dstImage);
7128            return false;
7129        };
7130        cb_data->second->validate_functions.push_back(function);
7131        function = [=]() { return validate_memory_is_valid(dev_data, src_mem, "vkCmdCopyBufferToImage()"); };
7132        cb_data->second->validate_functions.push_back(function);
7133
7134        skipCall |= addCmd(dev_data, cb_data->second, CMD_COPYBUFFERTOIMAGE, "vkCmdCopyBufferToImage()");
7135        skipCall |= insideRenderPass(dev_data, cb_data->second, "vkCmdCopyBufferToImage");
7136        for (uint32_t i = 0; i < regionCount; ++i) {
7137            skipCall |= VerifyDestImageLayout(commandBuffer, dstImage, pRegions[i].imageSubresource, dstImageLayout);
7138        }
7139    }
7140    lock.unlock();
7141    if (!skipCall)
7142        dev_data->device_dispatch_table->CmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount,
7143                                                              pRegions);
7144}
7145
7146VKAPI_ATTR void VKAPI_CALL CmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage,
7147                                                VkImageLayout srcImageLayout, VkBuffer dstBuffer,
7148                                                uint32_t regionCount, const VkBufferImageCopy *pRegions) {
7149    bool skipCall = false;
7150    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7151    std::unique_lock<std::mutex> lock(global_lock);
7152    VkDeviceMemory src_mem, dst_mem;
7153    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &src_mem);
7154    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, src_mem, "vkCmdCopyImageToBuffer");
7155
7156    skipCall |= get_mem_binding_from_object(dev_data, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &dst_mem);
7157    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, dst_mem, "vkCmdCopyImageToBuffer");
7158    // Validate that dst buff & src image have correct usage flags set
7159    skipCall |= validate_image_usage_flags(dev_data, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true, "vkCmdCopyImageToBuffer()",
7160                                           "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
7161    skipCall |= validate_buffer_usage_flags(dev_data, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "vkCmdCopyImageToBuffer()",
7162                                            "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
7163
7164    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7165    if (cb_data != dev_data->commandBufferMap.end()) {
7166        std::function<bool()> function = [=]() {
7167            return validate_memory_is_valid(dev_data, src_mem, "vkCmdCopyImageToBuffer()", srcImage);
7168        };
7169        cb_data->second->validate_functions.push_back(function);
7170        function = [=]() {
7171            set_memory_valid(dev_data, dst_mem, true);
7172            return false;
7173        };
7174        cb_data->second->validate_functions.push_back(function);
7175
7176        skipCall |= addCmd(dev_data, cb_data->second, CMD_COPYIMAGETOBUFFER, "vkCmdCopyImageToBuffer()");
7177        skipCall |= insideRenderPass(dev_data, cb_data->second, "vkCmdCopyImageToBuffer");
7178        for (uint32_t i = 0; i < regionCount; ++i) {
7179            skipCall |= VerifySourceImageLayout(commandBuffer, srcImage, pRegions[i].imageSubresource, srcImageLayout);
7180        }
7181    }
7182    lock.unlock();
7183    if (!skipCall)
7184        dev_data->device_dispatch_table->CmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount,
7185                                                              pRegions);
7186}
7187
7188VKAPI_ATTR void VKAPI_CALL CmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer,
7189                                           VkDeviceSize dstOffset, VkDeviceSize dataSize, const uint32_t *pData) {
7190    bool skipCall = false;
7191    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7192    std::unique_lock<std::mutex> lock(global_lock);
7193    VkDeviceMemory mem;
7194    skipCall =
7195        get_mem_binding_from_object(dev_data, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7196    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdUpdateBuffer");
7197    // Validate that dst buff has correct usage flags set
7198    skipCall |= validate_buffer_usage_flags(dev_data, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "vkCmdUpdateBuffer()",
7199                                            "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
7200
7201    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7202    if (cb_data != dev_data->commandBufferMap.end()) {
7203        std::function<bool()> function = [=]() {
7204            set_memory_valid(dev_data, mem, true);
7205            return false;
7206        };
7207        cb_data->second->validate_functions.push_back(function);
7208
7209        skipCall |= addCmd(dev_data, cb_data->second, CMD_UPDATEBUFFER, "vkCmdUpdateBuffer()");
7210        skipCall |= insideRenderPass(dev_data, cb_data->second, "vkCmdCopyUpdateBuffer");
7211    }
7212    lock.unlock();
7213    if (!skipCall)
7214        dev_data->device_dispatch_table->CmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData);
7215}
7216
7217VKAPI_ATTR void VKAPI_CALL
7218CmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data) {
7219    bool skipCall = false;
7220    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7221    std::unique_lock<std::mutex> lock(global_lock);
7222    VkDeviceMemory mem;
7223    skipCall =
7224        get_mem_binding_from_object(dev_data, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7225    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdFillBuffer");
7226    // Validate that dst buff has correct usage flags set
7227    skipCall |= validate_buffer_usage_flags(dev_data, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "vkCmdFillBuffer()",
7228                                            "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
7229
7230    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7231    if (cb_data != dev_data->commandBufferMap.end()) {
7232        std::function<bool()> function = [=]() {
7233            set_memory_valid(dev_data, mem, true);
7234            return false;
7235        };
7236        cb_data->second->validate_functions.push_back(function);
7237
7238        skipCall |= addCmd(dev_data, cb_data->second, CMD_FILLBUFFER, "vkCmdFillBuffer()");
7239        skipCall |= insideRenderPass(dev_data, cb_data->second, "vkCmdCopyFillBuffer");
7240    }
7241    lock.unlock();
7242    if (!skipCall)
7243        dev_data->device_dispatch_table->CmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
7244}
7245
7246VKAPI_ATTR void VKAPI_CALL CmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
7247                                               const VkClearAttachment *pAttachments, uint32_t rectCount,
7248                                               const VkClearRect *pRects) {
7249    bool skipCall = false;
7250    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7251    std::unique_lock<std::mutex> lock(global_lock);
7252    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7253    if (pCB) {
7254        skipCall |= addCmd(dev_data, pCB, CMD_CLEARATTACHMENTS, "vkCmdClearAttachments()");
7255        // Warn if this is issued prior to Draw Cmd and clearing the entire attachment
7256        if (!hasDrawCmd(pCB) && (pCB->activeRenderPassBeginInfo.renderArea.extent.width == pRects[0].rect.extent.width) &&
7257            (pCB->activeRenderPassBeginInfo.renderArea.extent.height == pRects[0].rect.extent.height)) {
7258            // TODO : commandBuffer should be srcObj
7259            // There are times where app needs to use ClearAttachments (generally when reusing a buffer inside of a render pass)
7260            // Can we make this warning more specific? I'd like to avoid triggering this test if we can tell it's a use that must
7261            // call CmdClearAttachments
7262            // Otherwise this seems more like a performance warning.
7263            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7264                                VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, 0, DRAWSTATE_CLEAR_CMD_BEFORE_DRAW, "DS",
7265                                "vkCmdClearAttachments() issued on CB object 0x%" PRIxLEAST64 " prior to any Draw Cmds."
7266                                " It is recommended you use RenderPass LOAD_OP_CLEAR on Attachments prior to any Draw.",
7267                                (uint64_t)(commandBuffer));
7268        }
7269        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdClearAttachments");
7270    }
7271
7272    // Validate that attachment is in reference list of active subpass
7273    if (pCB->activeRenderPass) {
7274        const VkRenderPassCreateInfo *pRPCI = pCB->activeRenderPass->pCreateInfo;
7275        const VkSubpassDescription *pSD = &pRPCI->pSubpasses[pCB->activeSubpass];
7276
7277        for (uint32_t attachment_idx = 0; attachment_idx < attachmentCount; attachment_idx++) {
7278            const VkClearAttachment *attachment = &pAttachments[attachment_idx];
7279            if (attachment->aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) {
7280                bool found = false;
7281                for (uint32_t i = 0; i < pSD->colorAttachmentCount; i++) {
7282                    if (attachment->colorAttachment == pSD->pColorAttachments[i].attachment) {
7283                        found = true;
7284                        break;
7285                    }
7286                }
7287                if (!found) {
7288                    skipCall |= log_msg(
7289                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7290                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
7291                        "vkCmdClearAttachments() attachment index %d not found in attachment reference array of active subpass %d",
7292                        attachment->colorAttachment, pCB->activeSubpass);
7293                }
7294            } else if (attachment->aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
7295                if (!pSD->pDepthStencilAttachment || // Says no DS will be used in active subpass
7296                    (pSD->pDepthStencilAttachment->attachment ==
7297                     VK_ATTACHMENT_UNUSED)) { // Says no DS will be used in active subpass
7298
7299                    skipCall |= log_msg(
7300                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7301                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
7302                        "vkCmdClearAttachments() attachment index %d does not match depthStencilAttachment.attachment (%d) found "
7303                        "in active subpass %d",
7304                        attachment->colorAttachment,
7305                        (pSD->pDepthStencilAttachment) ? pSD->pDepthStencilAttachment->attachment : VK_ATTACHMENT_UNUSED,
7306                        pCB->activeSubpass);
7307                }
7308            }
7309        }
7310    }
7311    lock.unlock();
7312    if (!skipCall)
7313        dev_data->device_dispatch_table->CmdClearAttachments(commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
7314}
7315
7316VKAPI_ATTR void VKAPI_CALL CmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image,
7317                                              VkImageLayout imageLayout, const VkClearColorValue *pColor,
7318                                              uint32_t rangeCount, const VkImageSubresourceRange *pRanges) {
7319    bool skipCall = false;
7320    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7321    std::unique_lock<std::mutex> lock(global_lock);
7322    // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
7323    VkDeviceMemory mem;
7324    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7325    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdClearColorImage");
7326    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7327    if (cb_data != dev_data->commandBufferMap.end()) {
7328        std::function<bool()> function = [=]() {
7329            set_memory_valid(dev_data, mem, true, image);
7330            return false;
7331        };
7332        cb_data->second->validate_functions.push_back(function);
7333
7334        skipCall |= addCmd(dev_data, cb_data->second, CMD_CLEARCOLORIMAGE, "vkCmdClearColorImage()");
7335        skipCall |= insideRenderPass(dev_data, cb_data->second, "vkCmdClearColorImage");
7336    }
7337    lock.unlock();
7338    if (!skipCall)
7339        dev_data->device_dispatch_table->CmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
7340}
7341
7342VKAPI_ATTR void VKAPI_CALL
7343CmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
7344                          const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
7345                          const VkImageSubresourceRange *pRanges) {
7346    bool skipCall = false;
7347    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7348    std::unique_lock<std::mutex> lock(global_lock);
7349    // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
7350    VkDeviceMemory mem;
7351    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7352    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdClearDepthStencilImage");
7353    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7354    if (cb_data != dev_data->commandBufferMap.end()) {
7355        std::function<bool()> function = [=]() {
7356            set_memory_valid(dev_data, mem, true, image);
7357            return false;
7358        };
7359        cb_data->second->validate_functions.push_back(function);
7360
7361        skipCall |= addCmd(dev_data, cb_data->second, CMD_CLEARDEPTHSTENCILIMAGE, "vkCmdClearDepthStencilImage()");
7362        skipCall |= insideRenderPass(dev_data, cb_data->second, "vkCmdClearDepthStencilImage");
7363    }
7364    lock.unlock();
7365    if (!skipCall)
7366        dev_data->device_dispatch_table->CmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount,
7367                                                                   pRanges);
7368}
7369
7370VKAPI_ATTR void VKAPI_CALL
7371CmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
7372                VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve *pRegions) {
7373    bool skipCall = false;
7374    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7375    std::unique_lock<std::mutex> lock(global_lock);
7376    VkDeviceMemory src_mem, dst_mem;
7377    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &src_mem);
7378    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, src_mem, "vkCmdResolveImage");
7379
7380    skipCall |= get_mem_binding_from_object(dev_data, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &dst_mem);
7381    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, dst_mem, "vkCmdResolveImage");
7382    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7383    if (cb_data != dev_data->commandBufferMap.end()) {
7384        std::function<bool()> function = [=]() {
7385            return validate_memory_is_valid(dev_data, src_mem, "vkCmdResolveImage()", srcImage);
7386        };
7387        cb_data->second->validate_functions.push_back(function);
7388        function = [=]() {
7389            set_memory_valid(dev_data, dst_mem, true, dstImage);
7390            return false;
7391        };
7392        cb_data->second->validate_functions.push_back(function);
7393
7394        skipCall |= addCmd(dev_data, cb_data->second, CMD_RESOLVEIMAGE, "vkCmdResolveImage()");
7395        skipCall |= insideRenderPass(dev_data, cb_data->second, "vkCmdResolveImage");
7396    }
7397    lock.unlock();
7398    if (!skipCall)
7399        dev_data->device_dispatch_table->CmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
7400                                                         regionCount, pRegions);
7401}
7402
7403bool setEventStageMask(VkQueue queue, VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
7404    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7405    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7406    if (pCB) {
7407        pCB->eventToStageMap[event] = stageMask;
7408    }
7409    auto queue_data = dev_data->queueMap.find(queue);
7410    if (queue_data != dev_data->queueMap.end()) {
7411        queue_data->second.eventToStageMap[event] = stageMask;
7412    }
7413    return false;
7414}
7415
7416VKAPI_ATTR void VKAPI_CALL
7417CmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
7418    bool skipCall = false;
7419    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7420    std::unique_lock<std::mutex> lock(global_lock);
7421    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7422    if (pCB) {
7423        skipCall |= addCmd(dev_data, pCB, CMD_SETEVENT, "vkCmdSetEvent()");
7424        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdSetEvent");
7425        pCB->events.push_back(event);
7426        if (!pCB->waitedEvents.count(event)) {
7427            pCB->writeEventsBeforeWait.push_back(event);
7428        }
7429        std::function<bool(VkQueue)> eventUpdate =
7430            std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, stageMask);
7431        pCB->eventUpdates.push_back(eventUpdate);
7432    }
7433    lock.unlock();
7434    if (!skipCall)
7435        dev_data->device_dispatch_table->CmdSetEvent(commandBuffer, event, stageMask);
7436}
7437
7438VKAPI_ATTR void VKAPI_CALL
7439CmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
7440    bool skipCall = false;
7441    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7442    std::unique_lock<std::mutex> lock(global_lock);
7443    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7444    if (pCB) {
7445        skipCall |= addCmd(dev_data, pCB, CMD_RESETEVENT, "vkCmdResetEvent()");
7446        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdResetEvent");
7447        pCB->events.push_back(event);
7448        if (!pCB->waitedEvents.count(event)) {
7449            pCB->writeEventsBeforeWait.push_back(event);
7450        }
7451        std::function<bool(VkQueue)> eventUpdate =
7452            std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, VkPipelineStageFlags(0));
7453        pCB->eventUpdates.push_back(eventUpdate);
7454    }
7455    lock.unlock();
7456    if (!skipCall)
7457        dev_data->device_dispatch_table->CmdResetEvent(commandBuffer, event, stageMask);
7458}
7459
7460static bool TransitionImageLayouts(VkCommandBuffer cmdBuffer, uint32_t memBarrierCount,
7461                                   const VkImageMemoryBarrier *pImgMemBarriers) {
7462    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
7463    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
7464    bool skip = false;
7465    uint32_t levelCount = 0;
7466    uint32_t layerCount = 0;
7467
7468    for (uint32_t i = 0; i < memBarrierCount; ++i) {
7469        auto mem_barrier = &pImgMemBarriers[i];
7470        if (!mem_barrier)
7471            continue;
7472        // TODO: Do not iterate over every possibility - consolidate where
7473        // possible
7474        ResolveRemainingLevelsLayers(dev_data, &levelCount, &layerCount, mem_barrier->subresourceRange, mem_barrier->image);
7475
7476        for (uint32_t j = 0; j < levelCount; j++) {
7477            uint32_t level = mem_barrier->subresourceRange.baseMipLevel + j;
7478            for (uint32_t k = 0; k < layerCount; k++) {
7479                uint32_t layer = mem_barrier->subresourceRange.baseArrayLayer + k;
7480                VkImageSubresource sub = {mem_barrier->subresourceRange.aspectMask, level, layer};
7481                IMAGE_CMD_BUF_LAYOUT_NODE node;
7482                if (!FindLayout(pCB, mem_barrier->image, sub, node)) {
7483                    SetLayout(pCB, mem_barrier->image, sub,
7484                              IMAGE_CMD_BUF_LAYOUT_NODE(mem_barrier->oldLayout, mem_barrier->newLayout));
7485                    continue;
7486                }
7487                if (mem_barrier->oldLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
7488                    // TODO: Set memory invalid which is in mem_tracker currently
7489                } else if (node.layout != mem_barrier->oldLayout) {
7490                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
7491                                    __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "You cannot transition the layout from %s "
7492                                                                                    "when current layout is %s.",
7493                                    string_VkImageLayout(mem_barrier->oldLayout), string_VkImageLayout(node.layout));
7494                }
7495                SetLayout(pCB, mem_barrier->image, sub, mem_barrier->newLayout);
7496            }
7497        }
7498    }
7499    return skip;
7500}
7501
7502// Print readable FlagBits in FlagMask
7503static std::string string_VkAccessFlags(VkAccessFlags accessMask) {
7504    std::string result;
7505    std::string separator;
7506
7507    if (accessMask == 0) {
7508        result = "[None]";
7509    } else {
7510        result = "[";
7511        for (auto i = 0; i < 32; i++) {
7512            if (accessMask & (1 << i)) {
7513                result = result + separator + string_VkAccessFlagBits((VkAccessFlagBits)(1 << i));
7514                separator = " | ";
7515            }
7516        }
7517        result = result + "]";
7518    }
7519    return result;
7520}
7521
7522// AccessFlags MUST have 'required_bit' set, and may have one or more of 'optional_bits' set.
7523// If required_bit is zero, accessMask must have at least one of 'optional_bits' set
7524// TODO: Add tracking to ensure that at least one barrier has been set for these layout transitions
7525static bool ValidateMaskBits(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
7526                             const VkImageLayout &layout, VkAccessFlags required_bit, VkAccessFlags optional_bits,
7527                             const char *type) {
7528    bool skip_call = false;
7529
7530    if ((accessMask & required_bit) || (!required_bit && (accessMask & optional_bits))) {
7531        if (accessMask & ~(required_bit | optional_bits)) {
7532            // TODO: Verify against Valid Use
7533            skip_call |=
7534                log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7535                        DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask 0x%X %s are specified when layout is %s.",
7536                        type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
7537        }
7538    } else {
7539        if (!required_bit) {
7540            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7541                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must contain at least one of access bits %d "
7542                                                                  "%s when layout is %s, unless the app has previously added a "
7543                                                                  "barrier for this transition.",
7544                                 type, accessMask, string_VkAccessFlags(accessMask).c_str(), optional_bits,
7545                                 string_VkAccessFlags(optional_bits).c_str(), string_VkImageLayout(layout));
7546        } else {
7547            std::string opt_bits;
7548            if (optional_bits != 0) {
7549                std::stringstream ss;
7550                ss << optional_bits;
7551                opt_bits = "and may have optional bits " + ss.str() + ' ' + string_VkAccessFlags(optional_bits);
7552            }
7553            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7554                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must have required access bit %d %s %s when "
7555                                                                  "layout is %s, unless the app has previously added a barrier for "
7556                                                                  "this transition.",
7557                                 type, accessMask, string_VkAccessFlags(accessMask).c_str(), required_bit,
7558                                 string_VkAccessFlags(required_bit).c_str(), opt_bits.c_str(), string_VkImageLayout(layout));
7559        }
7560    }
7561    return skip_call;
7562}
7563
7564static bool ValidateMaskBitsFromLayouts(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
7565                                        const VkImageLayout &layout, const char *type) {
7566    bool skip_call = false;
7567    switch (layout) {
7568    case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: {
7569        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
7570                                      VK_ACCESS_COLOR_ATTACHMENT_READ_BIT, type);
7571        break;
7572    }
7573    case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: {
7574        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
7575                                      VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT, type);
7576        break;
7577    }
7578    case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: {
7579        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_WRITE_BIT, 0, type);
7580        break;
7581    }
7582    case VK_IMAGE_LAYOUT_PREINITIALIZED: {
7583        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_HOST_WRITE_BIT, 0, type);
7584        break;
7585    }
7586    case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: {
7587        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
7588                                      VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type);
7589        break;
7590    }
7591    case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: {
7592        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
7593                                      VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type);
7594        break;
7595    }
7596    case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: {
7597        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_READ_BIT, 0, type);
7598        break;
7599    }
7600    case VK_IMAGE_LAYOUT_UNDEFINED: {
7601        if (accessMask != 0) {
7602            // TODO: Verify against Valid Use section spec
7603            skip_call |=
7604                log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7605                        DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask 0x%X %s are specified when layout is %s.",
7606                        type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
7607        }
7608        break;
7609    }
7610    case VK_IMAGE_LAYOUT_GENERAL:
7611    default: { break; }
7612    }
7613    return skip_call;
7614}
7615
7616static bool ValidateBarriers(const char *funcName, VkCommandBuffer cmdBuffer, uint32_t memBarrierCount,
7617                             const VkMemoryBarrier *pMemBarriers, uint32_t bufferBarrierCount,
7618                             const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
7619                             const VkImageMemoryBarrier *pImageMemBarriers) {
7620    bool skip_call = false;
7621    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
7622    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
7623    if (pCB->activeRenderPass && memBarrierCount) {
7624        if (!pCB->activeRenderPass->hasSelfDependency[pCB->activeSubpass]) {
7625            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7626                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Barriers cannot be set during subpass %d "
7627                                                                  "with no self dependency specified.",
7628                                 funcName, pCB->activeSubpass);
7629        }
7630    }
7631    for (uint32_t i = 0; i < imageMemBarrierCount; ++i) {
7632        auto mem_barrier = &pImageMemBarriers[i];
7633        auto image_data = getImageNode(dev_data, mem_barrier->image);
7634        if (image_data) {
7635            uint32_t src_q_f_index = mem_barrier->srcQueueFamilyIndex;
7636            uint32_t dst_q_f_index = mem_barrier->dstQueueFamilyIndex;
7637            if (image_data->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
7638                // srcQueueFamilyIndex and dstQueueFamilyIndex must both
7639                // be VK_QUEUE_FAMILY_IGNORED
7640                if ((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) {
7641                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
7642                                         __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
7643                                         "%s: Image Barrier for image 0x%" PRIx64 " was created with sharingMode of "
7644                                         "VK_SHARING_MODE_CONCURRENT.  Src and dst "
7645                                         " queueFamilyIndices must be VK_QUEUE_FAMILY_IGNORED.",
7646                                         funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
7647                }
7648            } else {
7649                // Sharing mode is VK_SHARING_MODE_EXCLUSIVE. srcQueueFamilyIndex and
7650                // dstQueueFamilyIndex must either both be VK_QUEUE_FAMILY_IGNORED,
7651                // or both be a valid queue family
7652                if (((src_q_f_index == VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index == VK_QUEUE_FAMILY_IGNORED)) &&
7653                    (src_q_f_index != dst_q_f_index)) {
7654                    skip_call |=
7655                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7656                                DRAWSTATE_INVALID_QUEUE_INDEX, "DS", "%s: Image 0x%" PRIx64 " was created with sharingMode "
7657                                                                     "of VK_SHARING_MODE_EXCLUSIVE. If one of src- or "
7658                                                                     "dstQueueFamilyIndex is VK_QUEUE_FAMILY_IGNORED, both "
7659                                                                     "must be.",
7660                                funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
7661                } else if (((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) && (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) &&
7662                           ((src_q_f_index >= dev_data->phys_dev_properties.queue_family_properties.size()) ||
7663                            (dst_q_f_index >= dev_data->phys_dev_properties.queue_family_properties.size()))) {
7664                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
7665                                         __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
7666                                         "%s: Image 0x%" PRIx64 " was created with sharingMode "
7667                                         "of VK_SHARING_MODE_EXCLUSIVE, but srcQueueFamilyIndex %d"
7668                                         " or dstQueueFamilyIndex %d is greater than " PRINTF_SIZE_T_SPECIFIER
7669                                         "queueFamilies crated for this device.",
7670                                         funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image), src_q_f_index,
7671                                         dst_q_f_index, dev_data->phys_dev_properties.queue_family_properties.size());
7672                }
7673            }
7674        }
7675
7676        if (mem_barrier) {
7677            skip_call |=
7678                ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->srcAccessMask, mem_barrier->oldLayout, "Source");
7679            skip_call |=
7680                ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->dstAccessMask, mem_barrier->newLayout, "Dest");
7681            if (mem_barrier->newLayout == VK_IMAGE_LAYOUT_UNDEFINED || mem_barrier->newLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
7682                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7683                        DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image Layout cannot be transitioned to UNDEFINED or "
7684                                                         "PREINITIALIZED.",
7685                        funcName);
7686            }
7687            auto image_data = getImageNode(dev_data, mem_barrier->image);
7688            VkFormat format = VK_FORMAT_UNDEFINED;
7689            uint32_t arrayLayers = 0, mipLevels = 0;
7690            bool imageFound = false;
7691            if (image_data) {
7692                format = image_data->createInfo.format;
7693                arrayLayers = image_data->createInfo.arrayLayers;
7694                mipLevels = image_data->createInfo.mipLevels;
7695                imageFound = true;
7696            } else if (dev_data->device_extensions.wsi_enabled) {
7697                auto imageswap_data = getSwapchainFromImage(dev_data, mem_barrier->image);
7698                if (imageswap_data) {
7699                    auto swapchain_data = getSwapchainNode(dev_data, imageswap_data);
7700                    if (swapchain_data) {
7701                        format = swapchain_data->createInfo.imageFormat;
7702                        arrayLayers = swapchain_data->createInfo.imageArrayLayers;
7703                        mipLevels = 1;
7704                        imageFound = true;
7705                    }
7706                }
7707            }
7708            if (imageFound) {
7709                if (vk_format_is_depth_and_stencil(format) &&
7710                    (!(mem_barrier->subresourceRange.aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) ||
7711                     !(mem_barrier->subresourceRange.aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT))) {
7712                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7713                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image is a depth and stencil format and thus must "
7714                                                             "have both VK_IMAGE_ASPECT_DEPTH_BIT and "
7715                                                             "VK_IMAGE_ASPECT_STENCIL_BIT set.",
7716                            funcName);
7717                }
7718                int layerCount = (mem_barrier->subresourceRange.layerCount == VK_REMAINING_ARRAY_LAYERS)
7719                                     ? 1
7720                                     : mem_barrier->subresourceRange.layerCount;
7721                if ((mem_barrier->subresourceRange.baseArrayLayer + layerCount) > arrayLayers) {
7722                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7723                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the "
7724                                                             "baseArrayLayer (%d) and layerCount (%d) be less "
7725                                                             "than or equal to the total number of layers (%d).",
7726                            funcName, mem_barrier->subresourceRange.baseArrayLayer, mem_barrier->subresourceRange.layerCount,
7727                            arrayLayers);
7728                }
7729                int levelCount = (mem_barrier->subresourceRange.levelCount == VK_REMAINING_MIP_LEVELS)
7730                                     ? 1
7731                                     : mem_barrier->subresourceRange.levelCount;
7732                if ((mem_barrier->subresourceRange.baseMipLevel + levelCount) > mipLevels) {
7733                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7734                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the baseMipLevel "
7735                                                             "(%d) and levelCount (%d) be less than or equal to "
7736                                                             "the total number of levels (%d).",
7737                            funcName, mem_barrier->subresourceRange.baseMipLevel, mem_barrier->subresourceRange.levelCount,
7738                            mipLevels);
7739                }
7740            }
7741        }
7742    }
7743    for (uint32_t i = 0; i < bufferBarrierCount; ++i) {
7744        auto mem_barrier = &pBufferMemBarriers[i];
7745        if (pCB->activeRenderPass) {
7746            skip_call |=
7747                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7748                        DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barriers cannot be used during a render pass.", funcName);
7749        }
7750        if (!mem_barrier)
7751            continue;
7752
7753        // Validate buffer barrier queue family indices
7754        if ((mem_barrier->srcQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
7755             mem_barrier->srcQueueFamilyIndex >= dev_data->phys_dev_properties.queue_family_properties.size()) ||
7756            (mem_barrier->dstQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
7757             mem_barrier->dstQueueFamilyIndex >= dev_data->phys_dev_properties.queue_family_properties.size())) {
7758            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7759                                 DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
7760                                 "%s: Buffer Barrier 0x%" PRIx64 " has QueueFamilyIndex greater "
7761                                 "than the number of QueueFamilies (" PRINTF_SIZE_T_SPECIFIER ") for this device.",
7762                                 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
7763                                 dev_data->phys_dev_properties.queue_family_properties.size());
7764        }
7765
7766        auto buffer_node = getBufferNode(dev_data, mem_barrier->buffer);
7767        if (buffer_node) {
7768            VkDeviceSize buffer_size =
7769                (buffer_node->createInfo.sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO) ? buffer_node->createInfo.size : 0;
7770            if (mem_barrier->offset >= buffer_size) {
7771                skip_call |= log_msg(
7772                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7773                    DRAWSTATE_INVALID_BARRIER, "DS",
7774                    "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64 " which is not less than total size 0x%" PRIx64 ".",
7775                    funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
7776                    reinterpret_cast<const uint64_t &>(mem_barrier->offset), reinterpret_cast<const uint64_t &>(buffer_size));
7777            } else if (mem_barrier->size != VK_WHOLE_SIZE && (mem_barrier->offset + mem_barrier->size > buffer_size)) {
7778                skip_call |= log_msg(
7779                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7780                    DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64 " and size 0x%" PRIx64
7781                                                     " whose sum is greater than total size 0x%" PRIx64 ".",
7782                    funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
7783                    reinterpret_cast<const uint64_t &>(mem_barrier->offset), reinterpret_cast<const uint64_t &>(mem_barrier->size),
7784                    reinterpret_cast<const uint64_t &>(buffer_size));
7785            }
7786        }
7787    }
7788    return skip_call;
7789}
7790
7791bool validateEventStageMask(VkQueue queue, GLOBAL_CB_NODE *pCB, uint32_t eventCount, size_t firstEventIndex, VkPipelineStageFlags sourceStageMask) {
7792    bool skip_call = false;
7793    VkPipelineStageFlags stageMask = 0;
7794    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
7795    for (uint32_t i = 0; i < eventCount; ++i) {
7796        auto event = pCB->events[firstEventIndex + i];
7797        auto queue_data = dev_data->queueMap.find(queue);
7798        if (queue_data == dev_data->queueMap.end())
7799            return false;
7800        auto event_data = queue_data->second.eventToStageMap.find(event);
7801        if (event_data != queue_data->second.eventToStageMap.end()) {
7802            stageMask |= event_data->second;
7803        } else {
7804            auto global_event_data = dev_data->eventMap.find(event);
7805            if (global_event_data == dev_data->eventMap.end()) {
7806                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
7807                                     reinterpret_cast<const uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
7808                                     "Event 0x%" PRIx64 " cannot be waited on if it has never been set.",
7809                                     reinterpret_cast<const uint64_t &>(event));
7810            } else {
7811                stageMask |= global_event_data->second.stageMask;
7812            }
7813        }
7814    }
7815    // TODO: Need to validate that host_bit is only set if set event is called
7816    // but set event can be called at any time.
7817    if (sourceStageMask != stageMask && sourceStageMask != (stageMask | VK_PIPELINE_STAGE_HOST_BIT)) {
7818        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7819                             DRAWSTATE_INVALID_EVENT, "DS", "Submitting cmdbuffer with call to VkCmdWaitEvents "
7820                                                            "using srcStageMask 0x%x which must be the bitwise "
7821                                                            "OR of the stageMask parameters used in calls to "
7822                                                            "vkCmdSetEvent and VK_PIPELINE_STAGE_HOST_BIT if "
7823                                                            "used with vkSetEvent but instead is 0x%x.",
7824                             sourceStageMask, stageMask);
7825    }
7826    return skip_call;
7827}
7828
7829VKAPI_ATTR void VKAPI_CALL
7830CmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents, VkPipelineStageFlags sourceStageMask,
7831              VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
7832              uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
7833              uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
7834    bool skipCall = false;
7835    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7836    std::unique_lock<std::mutex> lock(global_lock);
7837    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7838    if (pCB) {
7839        auto firstEventIndex = pCB->events.size();
7840        for (uint32_t i = 0; i < eventCount; ++i) {
7841            pCB->waitedEvents.insert(pEvents[i]);
7842            pCB->events.push_back(pEvents[i]);
7843        }
7844        std::function<bool(VkQueue)> eventUpdate =
7845            std::bind(validateEventStageMask, std::placeholders::_1, pCB, eventCount, firstEventIndex, sourceStageMask);
7846        pCB->eventUpdates.push_back(eventUpdate);
7847        if (pCB->state == CB_RECORDING) {
7848            skipCall |= addCmd(dev_data, pCB, CMD_WAITEVENTS, "vkCmdWaitEvents()");
7849        } else {
7850            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWaitEvents()");
7851        }
7852        skipCall |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
7853        skipCall |=
7854            ValidateBarriers("vkCmdWaitEvents", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
7855                             pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
7856    }
7857    lock.unlock();
7858    if (!skipCall)
7859        dev_data->device_dispatch_table->CmdWaitEvents(commandBuffer, eventCount, pEvents, sourceStageMask, dstStageMask,
7860                                                       memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
7861                                                       pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
7862}
7863
7864VKAPI_ATTR void VKAPI_CALL
7865CmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
7866                   VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
7867                   uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
7868                   uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
7869    bool skipCall = false;
7870    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7871    std::unique_lock<std::mutex> lock(global_lock);
7872    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7873    if (pCB) {
7874        skipCall |= addCmd(dev_data, pCB, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()");
7875        skipCall |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
7876        skipCall |=
7877            ValidateBarriers("vkCmdPipelineBarrier", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
7878                             pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
7879    }
7880    lock.unlock();
7881    if (!skipCall)
7882        dev_data->device_dispatch_table->CmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags,
7883                                                            memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
7884                                                            pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
7885}
7886
7887bool setQueryState(VkQueue queue, VkCommandBuffer commandBuffer, QueryObject object, bool value) {
7888    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7889    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7890    if (pCB) {
7891        pCB->queryToStateMap[object] = value;
7892    }
7893    auto queue_data = dev_data->queueMap.find(queue);
7894    if (queue_data != dev_data->queueMap.end()) {
7895        queue_data->second.queryToStateMap[object] = value;
7896    }
7897    return false;
7898}
7899
7900VKAPI_ATTR void VKAPI_CALL
7901CmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) {
7902    bool skipCall = false;
7903    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7904    std::unique_lock<std::mutex> lock(global_lock);
7905    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7906    if (pCB) {
7907        QueryObject query = {queryPool, slot};
7908        pCB->activeQueries.insert(query);
7909        if (!pCB->startedQueries.count(query)) {
7910            pCB->startedQueries.insert(query);
7911        }
7912        skipCall |= addCmd(dev_data, pCB, CMD_BEGINQUERY, "vkCmdBeginQuery()");
7913    }
7914    lock.unlock();
7915    if (!skipCall)
7916        dev_data->device_dispatch_table->CmdBeginQuery(commandBuffer, queryPool, slot, flags);
7917}
7918
7919VKAPI_ATTR void VKAPI_CALL CmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) {
7920    bool skipCall = false;
7921    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7922    std::unique_lock<std::mutex> lock(global_lock);
7923    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7924    if (pCB) {
7925        QueryObject query = {queryPool, slot};
7926        if (!pCB->activeQueries.count(query)) {
7927            skipCall |=
7928                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7929                        DRAWSTATE_INVALID_QUERY, "DS", "Ending a query before it was started: queryPool 0x%" PRIx64 ", index %d",
7930                        (uint64_t)(queryPool), slot);
7931        } else {
7932            pCB->activeQueries.erase(query);
7933        }
7934        std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, true);
7935        pCB->queryUpdates.push_back(queryUpdate);
7936        if (pCB->state == CB_RECORDING) {
7937            skipCall |= addCmd(dev_data, pCB, CMD_ENDQUERY, "VkCmdEndQuery()");
7938        } else {
7939            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdEndQuery()");
7940        }
7941    }
7942    lock.unlock();
7943    if (!skipCall)
7944        dev_data->device_dispatch_table->CmdEndQuery(commandBuffer, queryPool, slot);
7945}
7946
7947VKAPI_ATTR void VKAPI_CALL
7948CmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) {
7949    bool skipCall = false;
7950    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7951    std::unique_lock<std::mutex> lock(global_lock);
7952    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7953    if (pCB) {
7954        for (uint32_t i = 0; i < queryCount; i++) {
7955            QueryObject query = {queryPool, firstQuery + i};
7956            pCB->waitedEventsBeforeQueryReset[query] = pCB->waitedEvents;
7957            std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, false);
7958            pCB->queryUpdates.push_back(queryUpdate);
7959        }
7960        if (pCB->state == CB_RECORDING) {
7961            skipCall |= addCmd(dev_data, pCB, CMD_RESETQUERYPOOL, "VkCmdResetQueryPool()");
7962        } else {
7963            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdResetQueryPool()");
7964        }
7965        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdQueryPool");
7966    }
7967    lock.unlock();
7968    if (!skipCall)
7969        dev_data->device_dispatch_table->CmdResetQueryPool(commandBuffer, queryPool, firstQuery, queryCount);
7970}
7971
7972bool validateQuery(VkQueue queue, GLOBAL_CB_NODE *pCB, VkQueryPool queryPool, uint32_t queryCount, uint32_t firstQuery) {
7973    bool skip_call = false;
7974    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(pCB->commandBuffer), layer_data_map);
7975    auto queue_data = dev_data->queueMap.find(queue);
7976    if (queue_data == dev_data->queueMap.end())
7977        return false;
7978    for (uint32_t i = 0; i < queryCount; i++) {
7979        QueryObject query = {queryPool, firstQuery + i};
7980        auto query_data = queue_data->second.queryToStateMap.find(query);
7981        bool fail = false;
7982        if (query_data != queue_data->second.queryToStateMap.end()) {
7983            if (!query_data->second) {
7984                fail = true;
7985            }
7986        } else {
7987            auto global_query_data = dev_data->queryToStateMap.find(query);
7988            if (global_query_data != dev_data->queryToStateMap.end()) {
7989                if (!global_query_data->second) {
7990                    fail = true;
7991                }
7992            } else {
7993                fail = true;
7994            }
7995        }
7996        if (fail) {
7997            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7998                                 DRAWSTATE_INVALID_QUERY, "DS",
7999                                 "Requesting a copy from query to buffer with invalid query: queryPool 0x%" PRIx64 ", index %d",
8000                                 reinterpret_cast<uint64_t &>(queryPool), firstQuery + i);
8001        }
8002    }
8003    return skip_call;
8004}
8005
8006VKAPI_ATTR void VKAPI_CALL
8007CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
8008                        VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags) {
8009    bool skipCall = false;
8010    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8011    std::unique_lock<std::mutex> lock(global_lock);
8012    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8013#if MTMERGESOURCE
8014    VkDeviceMemory mem;
8015    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8016    skipCall |=
8017        get_mem_binding_from_object(dev_data, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
8018    if (cb_data != dev_data->commandBufferMap.end()) {
8019        std::function<bool()> function = [=]() {
8020            set_memory_valid(dev_data, mem, true);
8021            return false;
8022        };
8023        cb_data->second->validate_functions.push_back(function);
8024    }
8025    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyQueryPoolResults");
8026    // Validate that DST buffer has correct usage flags set
8027    skipCall |= validate_buffer_usage_flags(dev_data, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8028                                            "vkCmdCopyQueryPoolResults()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8029#endif
8030    if (pCB) {
8031        std::function<bool(VkQueue)> queryUpdate =
8032            std::bind(validateQuery, std::placeholders::_1, pCB, queryPool, queryCount, firstQuery);
8033        pCB->queryUpdates.push_back(queryUpdate);
8034        if (pCB->state == CB_RECORDING) {
8035            skipCall |= addCmd(dev_data, pCB, CMD_COPYQUERYPOOLRESULTS, "vkCmdCopyQueryPoolResults()");
8036        } else {
8037            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdCopyQueryPoolResults()");
8038        }
8039        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyQueryPoolResults");
8040    }
8041    lock.unlock();
8042    if (!skipCall)
8043        dev_data->device_dispatch_table->CmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer,
8044                                                                 dstOffset, stride, flags);
8045}
8046
8047VKAPI_ATTR void VKAPI_CALL CmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout,
8048                                            VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size,
8049                                            const void *pValues) {
8050    bool skipCall = false;
8051    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8052    std::unique_lock<std::mutex> lock(global_lock);
8053    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8054    if (pCB) {
8055        if (pCB->state == CB_RECORDING) {
8056            skipCall |= addCmd(dev_data, pCB, CMD_PUSHCONSTANTS, "vkCmdPushConstants()");
8057        } else {
8058            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdPushConstants()");
8059        }
8060    }
8061    skipCall |= validatePushConstantRange(dev_data, offset, size, "vkCmdPushConstants()");
8062    if (0 == stageFlags) {
8063        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8064                            DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCmdPushConstants() call has no stageFlags set.");
8065    }
8066
8067    // Check if push constant update is within any of the ranges with the same stage flags specified in pipeline layout.
8068    auto pipeline_layout = getPipelineLayout(dev_data, layout);
8069    if (!pipeline_layout) {
8070        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8071                            DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCmdPushConstants() Pipeline Layout 0x%" PRIx64 " not found.",
8072                            (uint64_t)layout);
8073    } else {
8074        // Coalesce adjacent/overlapping pipeline ranges before checking to see if incoming range is
8075        // contained in the pipeline ranges.
8076        // Build a {start, end} span list for ranges with matching stage flags.
8077        const auto &ranges = pipeline_layout->pushConstantRanges;
8078        struct span {
8079            uint32_t start;
8080            uint32_t end;
8081        };
8082        std::vector<span> spans;
8083        spans.reserve(ranges.size());
8084        for (const auto &iter : ranges) {
8085            if (iter.stageFlags == stageFlags) {
8086                spans.push_back({iter.offset, iter.offset + iter.size});
8087            }
8088        }
8089        if (spans.size() == 0) {
8090            // There were no ranges that matched the stageFlags.
8091            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8092                                DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS",
8093                                "vkCmdPushConstants() stageFlags = 0x%" PRIx32 " do not match "
8094                                "the stageFlags in any of the ranges in pipeline layout 0x%" PRIx64 ".",
8095                                (uint32_t)stageFlags, (uint64_t)layout);
8096        } else {
8097            // Sort span list by start value.
8098            struct comparer {
8099                bool operator()(struct span i, struct span j) { return i.start < j.start; }
8100            } my_comparer;
8101            std::sort(spans.begin(), spans.end(), my_comparer);
8102
8103            // Examine two spans at a time.
8104            std::vector<span>::iterator current = spans.begin();
8105            std::vector<span>::iterator next = current + 1;
8106            while (next != spans.end()) {
8107                if (current->end < next->start) {
8108                    // There is a gap; cannot coalesce. Move to the next two spans.
8109                    ++current;
8110                    ++next;
8111                } else {
8112                    // Coalesce the two spans.  The start of the next span
8113                    // is within the current span, so pick the larger of
8114                    // the end values to extend the current span.
8115                    // Then delete the next span and set next to the span after it.
8116                    current->end = max(current->end, next->end);
8117                    next = spans.erase(next);
8118                }
8119            }
8120
8121            // Now we can check if the incoming range is within any of the spans.
8122            bool contained_in_a_range = false;
8123            for (uint32_t i = 0; i < spans.size(); ++i) {
8124                if ((offset >= spans[i].start) && ((uint64_t)offset + (uint64_t)size <= (uint64_t)spans[i].end)) {
8125                    contained_in_a_range = true;
8126                    break;
8127                }
8128            }
8129            if (!contained_in_a_range) {
8130                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8131                                    __LINE__, DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS",
8132                                    "vkCmdPushConstants() Push constant range [%d, %d) "
8133                                    "with stageFlags = 0x%" PRIx32 " "
8134                                    "not within flag-matching ranges in pipeline layout 0x%" PRIx64 ".",
8135                                    offset, offset + size, (uint32_t)stageFlags, (uint64_t)layout);
8136            }
8137        }
8138    }
8139    lock.unlock();
8140    if (!skipCall)
8141        dev_data->device_dispatch_table->CmdPushConstants(commandBuffer, layout, stageFlags, offset, size, pValues);
8142}
8143
8144VKAPI_ATTR void VKAPI_CALL
8145CmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t slot) {
8146    bool skipCall = false;
8147    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8148    std::unique_lock<std::mutex> lock(global_lock);
8149    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8150    if (pCB) {
8151        QueryObject query = {queryPool, slot};
8152        std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, true);
8153        pCB->queryUpdates.push_back(queryUpdate);
8154        if (pCB->state == CB_RECORDING) {
8155            skipCall |= addCmd(dev_data, pCB, CMD_WRITETIMESTAMP, "vkCmdWriteTimestamp()");
8156        } else {
8157            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWriteTimestamp()");
8158        }
8159    }
8160    lock.unlock();
8161    if (!skipCall)
8162        dev_data->device_dispatch_table->CmdWriteTimestamp(commandBuffer, pipelineStage, queryPool, slot);
8163}
8164
8165VKAPI_ATTR VkResult VKAPI_CALL CreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo,
8166                                                 const VkAllocationCallbacks *pAllocator,
8167                                                 VkFramebuffer *pFramebuffer) {
8168    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
8169    VkResult result = dev_data->device_dispatch_table->CreateFramebuffer(device, pCreateInfo, pAllocator, pFramebuffer);
8170    if (VK_SUCCESS == result) {
8171        // Shadow create info and store in map
8172        std::lock_guard<std::mutex> lock(global_lock);
8173
8174        auto & fbNode = dev_data->frameBufferMap[*pFramebuffer];
8175        fbNode.createInfo = *pCreateInfo;
8176        if (pCreateInfo->pAttachments) {
8177            auto attachments = new VkImageView[pCreateInfo->attachmentCount];
8178            memcpy(attachments,
8179                   pCreateInfo->pAttachments,
8180                   pCreateInfo->attachmentCount * sizeof(VkImageView));
8181            fbNode.createInfo.pAttachments = attachments;
8182        }
8183        for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
8184            VkImageView view = pCreateInfo->pAttachments[i];
8185            auto view_data = getImageViewData(dev_data, view);
8186            if (!view_data) {
8187                continue;
8188            }
8189            MT_FB_ATTACHMENT_INFO fb_info;
8190            get_mem_binding_from_object(dev_data, (uint64_t)(view_data->image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
8191                                        &fb_info.mem);
8192            fb_info.image = view_data->image;
8193            fbNode.attachments.push_back(fb_info);
8194        }
8195    }
8196    return result;
8197}
8198
8199static bool FindDependency(const int index, const int dependent, const std::vector<DAGNode> &subpass_to_node,
8200                           std::unordered_set<uint32_t> &processed_nodes) {
8201    // If we have already checked this node we have not found a dependency path so return false.
8202    if (processed_nodes.count(index))
8203        return false;
8204    processed_nodes.insert(index);
8205    const DAGNode &node = subpass_to_node[index];
8206    // Look for a dependency path. If one exists return true else recurse on the previous nodes.
8207    if (std::find(node.prev.begin(), node.prev.end(), dependent) == node.prev.end()) {
8208        for (auto elem : node.prev) {
8209            if (FindDependency(elem, dependent, subpass_to_node, processed_nodes))
8210                return true;
8211        }
8212    } else {
8213        return true;
8214    }
8215    return false;
8216}
8217
8218static bool CheckDependencyExists(const layer_data *my_data, const int subpass, const std::vector<uint32_t> &dependent_subpasses,
8219                                  const std::vector<DAGNode> &subpass_to_node, bool &skip_call) {
8220    bool result = true;
8221    // Loop through all subpasses that share the same attachment and make sure a dependency exists
8222    for (uint32_t k = 0; k < dependent_subpasses.size(); ++k) {
8223        if (static_cast<uint32_t>(subpass) == dependent_subpasses[k])
8224            continue;
8225        const DAGNode &node = subpass_to_node[subpass];
8226        // Check for a specified dependency between the two nodes. If one exists we are done.
8227        auto prev_elem = std::find(node.prev.begin(), node.prev.end(), dependent_subpasses[k]);
8228        auto next_elem = std::find(node.next.begin(), node.next.end(), dependent_subpasses[k]);
8229        if (prev_elem == node.prev.end() && next_elem == node.next.end()) {
8230            // If no dependency exits an implicit dependency still might. If not, throw an error.
8231            std::unordered_set<uint32_t> processed_nodes;
8232            if (!(FindDependency(subpass, dependent_subpasses[k], subpass_to_node, processed_nodes) ||
8233                FindDependency(dependent_subpasses[k], subpass, subpass_to_node, processed_nodes))) {
8234                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8235                                     __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
8236                                     "A dependency between subpasses %d and %d must exist but one is not specified.", subpass,
8237                                     dependent_subpasses[k]);
8238                result = false;
8239            }
8240        }
8241    }
8242    return result;
8243}
8244
8245static bool CheckPreserved(const layer_data *my_data, const VkRenderPassCreateInfo *pCreateInfo, const int index,
8246                           const uint32_t attachment, const std::vector<DAGNode> &subpass_to_node, int depth, bool &skip_call) {
8247    const DAGNode &node = subpass_to_node[index];
8248    // If this node writes to the attachment return true as next nodes need to preserve the attachment.
8249    const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
8250    for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8251        if (attachment == subpass.pColorAttachments[j].attachment)
8252            return true;
8253    }
8254    if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
8255        if (attachment == subpass.pDepthStencilAttachment->attachment)
8256            return true;
8257    }
8258    bool result = false;
8259    // Loop through previous nodes and see if any of them write to the attachment.
8260    for (auto elem : node.prev) {
8261        result |= CheckPreserved(my_data, pCreateInfo, elem, attachment, subpass_to_node, depth + 1, skip_call);
8262    }
8263    // If the attachment was written to by a previous node than this node needs to preserve it.
8264    if (result && depth > 0) {
8265        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
8266        bool has_preserved = false;
8267        for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
8268            if (subpass.pPreserveAttachments[j] == attachment) {
8269                has_preserved = true;
8270                break;
8271            }
8272        }
8273        if (!has_preserved) {
8274            skip_call |=
8275                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8276                        DRAWSTATE_INVALID_RENDERPASS, "DS",
8277                        "Attachment %d is used by a later subpass and must be preserved in subpass %d.", attachment, index);
8278        }
8279    }
8280    return result;
8281}
8282
8283template <class T> bool isRangeOverlapping(T offset1, T size1, T offset2, T size2) {
8284    return (((offset1 + size1) > offset2) && ((offset1 + size1) < (offset2 + size2))) ||
8285           ((offset1 > offset2) && (offset1 < (offset2 + size2)));
8286}
8287
8288bool isRegionOverlapping(VkImageSubresourceRange range1, VkImageSubresourceRange range2) {
8289    return (isRangeOverlapping(range1.baseMipLevel, range1.levelCount, range2.baseMipLevel, range2.levelCount) &&
8290            isRangeOverlapping(range1.baseArrayLayer, range1.layerCount, range2.baseArrayLayer, range2.layerCount));
8291}
8292
8293static bool ValidateDependencies(const layer_data *my_data, FRAMEBUFFER_NODE const * framebuffer,
8294                                 RENDER_PASS_NODE const * renderPass) {
8295    bool skip_call = false;
8296    const VkFramebufferCreateInfo *pFramebufferInfo = &framebuffer->createInfo;
8297    const VkRenderPassCreateInfo *pCreateInfo = renderPass->pCreateInfo;
8298    auto const & subpass_to_node = renderPass->subpassToNode;
8299    std::vector<std::vector<uint32_t>> output_attachment_to_subpass(pCreateInfo->attachmentCount);
8300    std::vector<std::vector<uint32_t>> input_attachment_to_subpass(pCreateInfo->attachmentCount);
8301    std::vector<std::vector<uint32_t>> overlapping_attachments(pCreateInfo->attachmentCount);
8302    // Find overlapping attachments
8303    for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
8304        for (uint32_t j = i + 1; j < pCreateInfo->attachmentCount; ++j) {
8305            VkImageView viewi = pFramebufferInfo->pAttachments[i];
8306            VkImageView viewj = pFramebufferInfo->pAttachments[j];
8307            if (viewi == viewj) {
8308                overlapping_attachments[i].push_back(j);
8309                overlapping_attachments[j].push_back(i);
8310                continue;
8311            }
8312            auto view_data_i = getImageViewData(my_data, viewi);
8313            auto view_data_j = getImageViewData(my_data, viewj);
8314            if (!view_data_i || !view_data_j) {
8315                continue;
8316            }
8317            if (view_data_i->image == view_data_j->image &&
8318                isRegionOverlapping(view_data_i->subresourceRange, view_data_j->subresourceRange)) {
8319                overlapping_attachments[i].push_back(j);
8320                overlapping_attachments[j].push_back(i);
8321                continue;
8322            }
8323            auto image_data_i = getImageNode(my_data, view_data_i->image);
8324            auto image_data_j = getImageNode(my_data, view_data_j->image);
8325            if (!image_data_i || !image_data_j) {
8326                continue;
8327            }
8328            if (image_data_i->mem == image_data_j->mem && isRangeOverlapping(image_data_i->memOffset, image_data_i->memSize,
8329                                                                             image_data_j->memOffset, image_data_j->memSize)) {
8330                overlapping_attachments[i].push_back(j);
8331                overlapping_attachments[j].push_back(i);
8332            }
8333        }
8334    }
8335    for (uint32_t i = 0; i < overlapping_attachments.size(); ++i) {
8336        uint32_t attachment = i;
8337        for (auto other_attachment : overlapping_attachments[i]) {
8338            if (!(pCreateInfo->pAttachments[attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
8339                skip_call |=
8340                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8341                            DRAWSTATE_INVALID_RENDERPASS, "DS", "Attachment %d aliases attachment %d but doesn't "
8342                                                                "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
8343                            attachment, other_attachment);
8344            }
8345            if (!(pCreateInfo->pAttachments[other_attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
8346                skip_call |=
8347                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8348                            DRAWSTATE_INVALID_RENDERPASS, "DS", "Attachment %d aliases attachment %d but doesn't "
8349                                                                "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
8350                            other_attachment, attachment);
8351            }
8352        }
8353    }
8354    // Find for each attachment the subpasses that use them.
8355    unordered_set<uint32_t> attachmentIndices;
8356    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8357        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8358        attachmentIndices.clear();
8359        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8360            uint32_t attachment = subpass.pInputAttachments[j].attachment;
8361            input_attachment_to_subpass[attachment].push_back(i);
8362            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
8363                input_attachment_to_subpass[overlapping_attachment].push_back(i);
8364            }
8365        }
8366        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8367            uint32_t attachment = subpass.pColorAttachments[j].attachment;
8368            output_attachment_to_subpass[attachment].push_back(i);
8369            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
8370                output_attachment_to_subpass[overlapping_attachment].push_back(i);
8371            }
8372            attachmentIndices.insert(attachment);
8373        }
8374        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
8375            uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
8376            output_attachment_to_subpass[attachment].push_back(i);
8377            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
8378                output_attachment_to_subpass[overlapping_attachment].push_back(i);
8379            }
8380
8381            if (attachmentIndices.count(attachment)) {
8382                skip_call |=
8383                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
8384                            0, __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
8385                            "Cannot use same attachment (%u) as both color and depth output in same subpass (%u).",
8386                            attachment, i);
8387            }
8388        }
8389    }
8390    // If there is a dependency needed make sure one exists
8391    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8392        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8393        // If the attachment is an input then all subpasses that output must have a dependency relationship
8394        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8395            const uint32_t &attachment = subpass.pInputAttachments[j].attachment;
8396            CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
8397        }
8398        // If the attachment is an output then all subpasses that use the attachment must have a dependency relationship
8399        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8400            const uint32_t &attachment = subpass.pColorAttachments[j].attachment;
8401            CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
8402            CheckDependencyExists(my_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
8403        }
8404        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
8405            const uint32_t &attachment = subpass.pDepthStencilAttachment->attachment;
8406            CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
8407            CheckDependencyExists(my_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
8408        }
8409    }
8410    // Loop through implicit dependencies, if this pass reads make sure the attachment is preserved for all passes after it was
8411    // written.
8412    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8413        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8414        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8415            CheckPreserved(my_data, pCreateInfo, i, subpass.pInputAttachments[j].attachment, subpass_to_node, 0, skip_call);
8416        }
8417    }
8418    return skip_call;
8419}
8420// ValidateLayoutVsAttachmentDescription is a general function where we can validate various state associated with the
8421// VkAttachmentDescription structs that are used by the sub-passes of a renderpass. Initial check is to make sure that
8422// READ_ONLY layout attachments don't have CLEAR as their loadOp.
8423static bool ValidateLayoutVsAttachmentDescription(debug_report_data *report_data, const VkImageLayout first_layout,
8424                                                  const uint32_t attachment,
8425                                                  const VkAttachmentDescription &attachment_description) {
8426    bool skip_call = false;
8427    // Verify that initial loadOp on READ_ONLY attachments is not CLEAR
8428    if (attachment_description.loadOp == VK_ATTACHMENT_LOAD_OP_CLEAR) {
8429        if ((first_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) ||
8430            (first_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL)) {
8431            skip_call |=
8432                log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
8433                        VkDebugReportObjectTypeEXT(0), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8434                        "Cannot clear attachment %d with invalid first layout %s.", attachment, string_VkImageLayout(first_layout));
8435        }
8436    }
8437    return skip_call;
8438}
8439
8440static bool ValidateLayouts(const layer_data *my_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo) {
8441    bool skip = false;
8442
8443    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8444        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8445        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8446            if (subpass.pInputAttachments[j].layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL &&
8447                subpass.pInputAttachments[j].layout != VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) {
8448                if (subpass.pInputAttachments[j].layout == VK_IMAGE_LAYOUT_GENERAL) {
8449                    // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance
8450                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
8451                                    (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8452                                    "Layout for input attachment is GENERAL but should be READ_ONLY_OPTIMAL.");
8453                } else {
8454                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8455                                    DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8456                                    "Layout for input attachment is %s but can only be READ_ONLY_OPTIMAL or GENERAL.",
8457                                    string_VkImageLayout(subpass.pInputAttachments[j].layout));
8458                }
8459            }
8460            auto attach_index = subpass.pInputAttachments[j].attachment;
8461            skip |= ValidateLayoutVsAttachmentDescription(my_data->report_data, subpass.pInputAttachments[j].layout, attach_index,
8462                                                          pCreateInfo->pAttachments[attach_index]);
8463        }
8464        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8465            if (subpass.pColorAttachments[j].layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) {
8466                if (subpass.pColorAttachments[j].layout == VK_IMAGE_LAYOUT_GENERAL) {
8467                    // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance
8468                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
8469                                    (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8470                                    "Layout for color attachment is GENERAL but should be COLOR_ATTACHMENT_OPTIMAL.");
8471                } else {
8472                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8473                                    DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8474                                    "Layout for color attachment is %s but can only be COLOR_ATTACHMENT_OPTIMAL or GENERAL.",
8475                                    string_VkImageLayout(subpass.pColorAttachments[j].layout));
8476                }
8477            }
8478            auto attach_index = subpass.pColorAttachments[j].attachment;
8479            skip |= ValidateLayoutVsAttachmentDescription(my_data->report_data, subpass.pColorAttachments[j].layout, attach_index,
8480                                                          pCreateInfo->pAttachments[attach_index]);
8481        }
8482        if ((subpass.pDepthStencilAttachment != NULL) && (subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) {
8483            if (subpass.pDepthStencilAttachment->layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL) {
8484                if (subpass.pDepthStencilAttachment->layout == VK_IMAGE_LAYOUT_GENERAL) {
8485                    // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance
8486                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
8487                                    (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8488                                    "Layout for depth attachment is GENERAL but should be DEPTH_STENCIL_ATTACHMENT_OPTIMAL.");
8489                } else {
8490                    skip |=
8491                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8492                                DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8493                                "Layout for depth attachment is %s but can only be DEPTH_STENCIL_ATTACHMENT_OPTIMAL or GENERAL.",
8494                                string_VkImageLayout(subpass.pDepthStencilAttachment->layout));
8495                }
8496            }
8497            auto attach_index = subpass.pDepthStencilAttachment->attachment;
8498            skip |= ValidateLayoutVsAttachmentDescription(my_data->report_data, subpass.pDepthStencilAttachment->layout,
8499                                                          attach_index, pCreateInfo->pAttachments[attach_index]);
8500        }
8501    }
8502    return skip;
8503}
8504
8505static bool CreatePassDAG(const layer_data *my_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
8506                          std::vector<DAGNode> &subpass_to_node, std::vector<bool> &has_self_dependency) {
8507    bool skip_call = false;
8508    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8509        DAGNode &subpass_node = subpass_to_node[i];
8510        subpass_node.pass = i;
8511    }
8512    for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
8513        const VkSubpassDependency &dependency = pCreateInfo->pDependencies[i];
8514        if (dependency.srcSubpass > dependency.dstSubpass && dependency.srcSubpass != VK_SUBPASS_EXTERNAL &&
8515            dependency.dstSubpass != VK_SUBPASS_EXTERNAL) {
8516            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8517                                 DRAWSTATE_INVALID_RENDERPASS, "DS",
8518                                 "Depedency graph must be specified such that an earlier pass cannot depend on a later pass.");
8519        } else if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL && dependency.dstSubpass == VK_SUBPASS_EXTERNAL) {
8520            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8521                                 DRAWSTATE_INVALID_RENDERPASS, "DS", "The src and dest subpasses cannot both be external.");
8522        } else if (dependency.srcSubpass == dependency.dstSubpass) {
8523            has_self_dependency[dependency.srcSubpass] = true;
8524        }
8525        if (dependency.dstSubpass != VK_SUBPASS_EXTERNAL) {
8526            subpass_to_node[dependency.dstSubpass].prev.push_back(dependency.srcSubpass);
8527        }
8528        if (dependency.srcSubpass != VK_SUBPASS_EXTERNAL) {
8529            subpass_to_node[dependency.srcSubpass].next.push_back(dependency.dstSubpass);
8530        }
8531    }
8532    return skip_call;
8533}
8534
8535
8536VKAPI_ATTR VkResult VKAPI_CALL CreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo,
8537                                                  const VkAllocationCallbacks *pAllocator,
8538                                                  VkShaderModule *pShaderModule) {
8539    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
8540    bool skip_call = false;
8541
8542    /* Use SPIRV-Tools validator to try and catch any issues with the module itself */
8543    spv_context ctx = spvContextCreate(SPV_ENV_VULKAN_1_0);
8544    spv_const_binary_t binary { pCreateInfo->pCode, pCreateInfo->codeSize / sizeof(uint32_t) };
8545    spv_diagnostic diag = nullptr;
8546
8547    auto result = spvValidate(ctx, &binary, &diag);
8548    if (result != SPV_SUCCESS) {
8549        skip_call |= log_msg(my_data->report_data,
8550                             result == SPV_WARNING ? VK_DEBUG_REPORT_WARNING_BIT_EXT : VK_DEBUG_REPORT_ERROR_BIT_EXT,
8551                             VkDebugReportObjectTypeEXT(0), 0,
8552                             __LINE__, SHADER_CHECKER_INCONSISTENT_SPIRV, "SC", "SPIR-V module not valid: %s",
8553                             diag && diag->error ? diag->error : "(no error text)");
8554    }
8555
8556    spvDiagnosticDestroy(diag);
8557    spvContextDestroy(ctx);
8558
8559    if (skip_call)
8560        return VK_ERROR_VALIDATION_FAILED_EXT;
8561
8562    VkResult res = my_data->device_dispatch_table->CreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule);
8563
8564    if (res == VK_SUCCESS) {
8565        std::lock_guard<std::mutex> lock(global_lock);
8566        my_data->shaderModuleMap[*pShaderModule] = unique_ptr<shader_module>(new shader_module(pCreateInfo));
8567    }
8568    return res;
8569}
8570
8571VKAPI_ATTR VkResult VKAPI_CALL CreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
8572                                                const VkAllocationCallbacks *pAllocator,
8573                                                VkRenderPass *pRenderPass) {
8574    bool skip_call = false;
8575    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
8576    // Create DAG
8577    std::vector<bool> has_self_dependency(pCreateInfo->subpassCount);
8578    std::vector<DAGNode> subpass_to_node(pCreateInfo->subpassCount);
8579    {
8580        std::lock_guard<std::mutex> lock(global_lock);
8581        skip_call |= CreatePassDAG(dev_data, device, pCreateInfo, subpass_to_node, has_self_dependency);
8582        // Validate
8583        skip_call |= ValidateLayouts(dev_data, device, pCreateInfo);
8584        if (skip_call) {
8585            return VK_ERROR_VALIDATION_FAILED_EXT;
8586        }
8587    }
8588    VkResult result = dev_data->device_dispatch_table->CreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass);
8589    if (VK_SUCCESS == result) {
8590        // TODOSC : Merge in tracking of renderpass from shader_checker
8591        // Shadow create info and store in map
8592        VkRenderPassCreateInfo *localRPCI = new VkRenderPassCreateInfo(*pCreateInfo);
8593        if (pCreateInfo->pAttachments) {
8594            localRPCI->pAttachments = new VkAttachmentDescription[localRPCI->attachmentCount];
8595            memcpy((void *)localRPCI->pAttachments, pCreateInfo->pAttachments,
8596                   localRPCI->attachmentCount * sizeof(VkAttachmentDescription));
8597        }
8598        if (pCreateInfo->pSubpasses) {
8599            localRPCI->pSubpasses = new VkSubpassDescription[localRPCI->subpassCount];
8600            memcpy((void *)localRPCI->pSubpasses, pCreateInfo->pSubpasses, localRPCI->subpassCount * sizeof(VkSubpassDescription));
8601
8602            for (uint32_t i = 0; i < localRPCI->subpassCount; i++) {
8603                VkSubpassDescription *subpass = (VkSubpassDescription *)&localRPCI->pSubpasses[i];
8604                const uint32_t attachmentCount = subpass->inputAttachmentCount +
8605                                                 subpass->colorAttachmentCount * (1 + (subpass->pResolveAttachments ? 1 : 0)) +
8606                                                 ((subpass->pDepthStencilAttachment) ? 1 : 0) + subpass->preserveAttachmentCount;
8607                VkAttachmentReference *attachments = new VkAttachmentReference[attachmentCount];
8608
8609                memcpy(attachments, subpass->pInputAttachments, sizeof(attachments[0]) * subpass->inputAttachmentCount);
8610                subpass->pInputAttachments = attachments;
8611                attachments += subpass->inputAttachmentCount;
8612
8613                memcpy(attachments, subpass->pColorAttachments, sizeof(attachments[0]) * subpass->colorAttachmentCount);
8614                subpass->pColorAttachments = attachments;
8615                attachments += subpass->colorAttachmentCount;
8616
8617                if (subpass->pResolveAttachments) {
8618                    memcpy(attachments, subpass->pResolveAttachments, sizeof(attachments[0]) * subpass->colorAttachmentCount);
8619                    subpass->pResolveAttachments = attachments;
8620                    attachments += subpass->colorAttachmentCount;
8621                }
8622
8623                if (subpass->pDepthStencilAttachment) {
8624                    memcpy(attachments, subpass->pDepthStencilAttachment, sizeof(attachments[0]) * 1);
8625                    subpass->pDepthStencilAttachment = attachments;
8626                    attachments += 1;
8627                }
8628
8629                memcpy(attachments, subpass->pPreserveAttachments, sizeof(attachments[0]) * subpass->preserveAttachmentCount);
8630                subpass->pPreserveAttachments = &attachments->attachment;
8631            }
8632        }
8633        if (pCreateInfo->pDependencies) {
8634            localRPCI->pDependencies = new VkSubpassDependency[localRPCI->dependencyCount];
8635            memcpy((void *)localRPCI->pDependencies, pCreateInfo->pDependencies,
8636                   localRPCI->dependencyCount * sizeof(VkSubpassDependency));
8637        }
8638
8639        auto render_pass = new RENDER_PASS_NODE(localRPCI);
8640        render_pass->renderPass = *pRenderPass;
8641        render_pass->hasSelfDependency = has_self_dependency;
8642        render_pass->subpassToNode = subpass_to_node;
8643#if MTMERGESOURCE
8644        // MTMTODO : Merge with code from above to eliminate duplication
8645        for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
8646            VkAttachmentDescription desc = pCreateInfo->pAttachments[i];
8647            MT_PASS_ATTACHMENT_INFO pass_info;
8648            pass_info.load_op = desc.loadOp;
8649            pass_info.store_op = desc.storeOp;
8650            pass_info.attachment = i;
8651            render_pass->attachments.push_back(pass_info);
8652        }
8653        // TODO: Maybe fill list and then copy instead of locking
8654        std::unordered_map<uint32_t, bool> &attachment_first_read = render_pass->attachment_first_read;
8655        std::unordered_map<uint32_t, VkImageLayout> &attachment_first_layout =
8656            render_pass->attachment_first_layout;
8657        for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8658            const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8659            if (subpass.pipelineBindPoint != VK_PIPELINE_BIND_POINT_GRAPHICS) {
8660                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8661                                     __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
8662                                     "Pipeline bind point for subpass %d must be VK_PIPELINE_BIND_POINT_GRAPHICS.", i);
8663            }
8664            for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
8665                uint32_t attachment = subpass.pPreserveAttachments[j];
8666                if (attachment >= pCreateInfo->attachmentCount) {
8667                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8668                                         __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
8669                                         "Preserve attachment %d cannot be greater than the total number of attachments %d.",
8670                                         attachment, pCreateInfo->attachmentCount);
8671                }
8672            }
8673            for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8674                uint32_t attachment;
8675                if (subpass.pResolveAttachments) {
8676                    attachment = subpass.pResolveAttachments[j].attachment;
8677                    if (attachment >= pCreateInfo->attachmentCount && attachment != VK_ATTACHMENT_UNUSED) {
8678                        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8679                                             __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
8680                                             "Color attachment %d cannot be greater than the total number of attachments %d.",
8681                                             attachment, pCreateInfo->attachmentCount);
8682                        continue;
8683                    }
8684                }
8685                attachment = subpass.pColorAttachments[j].attachment;
8686                if (attachment >= pCreateInfo->attachmentCount) {
8687                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8688                                         __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
8689                                         "Color attachment %d cannot be greater than the total number of attachments %d.",
8690                                         attachment, pCreateInfo->attachmentCount);
8691                    continue;
8692                }
8693                if (attachment_first_read.count(attachment))
8694                    continue;
8695                attachment_first_read.insert(std::make_pair(attachment, false));
8696                attachment_first_layout.insert(std::make_pair(attachment, subpass.pColorAttachments[j].layout));
8697            }
8698            if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
8699                uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
8700                if (attachment >= pCreateInfo->attachmentCount) {
8701                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8702                                         __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
8703                                         "Depth stencil attachment %d cannot be greater than the total number of attachments %d.",
8704                                         attachment, pCreateInfo->attachmentCount);
8705                    continue;
8706                }
8707                if (attachment_first_read.count(attachment))
8708                    continue;
8709                attachment_first_read.insert(std::make_pair(attachment, false));
8710                attachment_first_layout.insert(std::make_pair(attachment, subpass.pDepthStencilAttachment->layout));
8711            }
8712            for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8713                uint32_t attachment = subpass.pInputAttachments[j].attachment;
8714                if (attachment >= pCreateInfo->attachmentCount) {
8715                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8716                                         __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
8717                                         "Input attachment %d cannot be greater than the total number of attachments %d.",
8718                                         attachment, pCreateInfo->attachmentCount);
8719                    continue;
8720                }
8721                if (attachment_first_read.count(attachment))
8722                    continue;
8723                attachment_first_read.insert(std::make_pair(attachment, true));
8724                attachment_first_layout.insert(std::make_pair(attachment, subpass.pInputAttachments[j].layout));
8725            }
8726        }
8727#endif
8728        {
8729            std::lock_guard<std::mutex> lock(global_lock);
8730            dev_data->renderPassMap[*pRenderPass] = render_pass;
8731        }
8732    }
8733    return result;
8734}
8735// Free the renderpass shadow
8736static void deleteRenderPasses(layer_data *my_data) {
8737    if (my_data->renderPassMap.size() <= 0)
8738        return;
8739    for (auto ii = my_data->renderPassMap.begin(); ii != my_data->renderPassMap.end(); ++ii) {
8740        const VkRenderPassCreateInfo *pRenderPassInfo = (*ii).second->pCreateInfo;
8741        delete[] pRenderPassInfo->pAttachments;
8742        if (pRenderPassInfo->pSubpasses) {
8743            for (uint32_t i = 0; i < pRenderPassInfo->subpassCount; ++i) {
8744                // Attachements are all allocated in a block, so just need to
8745                //  find the first non-null one to delete
8746                if (pRenderPassInfo->pSubpasses[i].pInputAttachments) {
8747                    delete[] pRenderPassInfo->pSubpasses[i].pInputAttachments;
8748                } else if (pRenderPassInfo->pSubpasses[i].pColorAttachments) {
8749                    delete[] pRenderPassInfo->pSubpasses[i].pColorAttachments;
8750                } else if (pRenderPassInfo->pSubpasses[i].pResolveAttachments) {
8751                    delete[] pRenderPassInfo->pSubpasses[i].pResolveAttachments;
8752                } else if (pRenderPassInfo->pSubpasses[i].pPreserveAttachments) {
8753                    delete[] pRenderPassInfo->pSubpasses[i].pPreserveAttachments;
8754                }
8755            }
8756            delete[] pRenderPassInfo->pSubpasses;
8757        }
8758        delete[] pRenderPassInfo->pDependencies;
8759        delete pRenderPassInfo;
8760        delete (*ii).second;
8761    }
8762    my_data->renderPassMap.clear();
8763}
8764
8765static bool VerifyFramebufferAndRenderPassLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin) {
8766    bool skip_call = false;
8767    const VkRenderPassCreateInfo *pRenderPassInfo = dev_data->renderPassMap[pRenderPassBegin->renderPass]->pCreateInfo;
8768    const VkFramebufferCreateInfo framebufferInfo = dev_data->frameBufferMap[pRenderPassBegin->framebuffer].createInfo;
8769    if (pRenderPassInfo->attachmentCount != framebufferInfo.attachmentCount) {
8770        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8771                             DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot start a render pass using a framebuffer "
8772                                                                 "with a different number of attachments.");
8773    }
8774    for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
8775        const VkImageView &image_view = framebufferInfo.pAttachments[i];
8776        auto image_data = getImageViewData(dev_data, image_view);
8777        assert(image_data);
8778        const VkImage &image = image_data->image;
8779        const VkImageSubresourceRange &subRange = image_data->subresourceRange;
8780        IMAGE_CMD_BUF_LAYOUT_NODE newNode = {pRenderPassInfo->pAttachments[i].initialLayout,
8781                                             pRenderPassInfo->pAttachments[i].initialLayout};
8782        // TODO: Do not iterate over every possibility - consolidate where possible
8783        for (uint32_t j = 0; j < subRange.levelCount; j++) {
8784            uint32_t level = subRange.baseMipLevel + j;
8785            for (uint32_t k = 0; k < subRange.layerCount; k++) {
8786                uint32_t layer = subRange.baseArrayLayer + k;
8787                VkImageSubresource sub = {subRange.aspectMask, level, layer};
8788                IMAGE_CMD_BUF_LAYOUT_NODE node;
8789                if (!FindLayout(pCB, image, sub, node)) {
8790                    SetLayout(pCB, image, sub, newNode);
8791                    continue;
8792                }
8793                if (newNode.layout != node.layout) {
8794                    skip_call |=
8795                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8796                                DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot start a render pass using attachment %i "
8797                                                                    "where the "
8798                                                                    "initial layout is %s and the layout of the attachment at the "
8799                                                                    "start of the render pass is %s. The layouts must match.",
8800                                i, string_VkImageLayout(newNode.layout), string_VkImageLayout(node.layout));
8801                }
8802            }
8803        }
8804    }
8805    return skip_call;
8806}
8807
8808static void TransitionSubpassLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin,
8809                                     const int subpass_index) {
8810    auto renderPass = getRenderPass(dev_data, pRenderPassBegin->renderPass);
8811    if (!renderPass)
8812        return;
8813
8814    auto framebuffer = getFramebuffer(dev_data, pRenderPassBegin->framebuffer);
8815    if (!framebuffer)
8816        return;
8817
8818    const VkFramebufferCreateInfo &framebufferInfo = framebuffer->createInfo;
8819    const VkSubpassDescription &subpass = renderPass->pCreateInfo->pSubpasses[subpass_index];
8820    for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8821        const VkImageView &image_view = framebufferInfo.pAttachments[subpass.pInputAttachments[j].attachment];
8822        SetLayout(dev_data, pCB, image_view, subpass.pInputAttachments[j].layout);
8823    }
8824    for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8825        const VkImageView &image_view = framebufferInfo.pAttachments[subpass.pColorAttachments[j].attachment];
8826        SetLayout(dev_data, pCB, image_view, subpass.pColorAttachments[j].layout);
8827    }
8828    if ((subpass.pDepthStencilAttachment != NULL) && (subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) {
8829        const VkImageView &image_view = framebufferInfo.pAttachments[subpass.pDepthStencilAttachment->attachment];
8830        SetLayout(dev_data, pCB, image_view, subpass.pDepthStencilAttachment->layout);
8831    }
8832}
8833
8834static bool validatePrimaryCommandBuffer(const layer_data *my_data, const GLOBAL_CB_NODE *pCB, const std::string &cmd_name) {
8835    bool skip_call = false;
8836    if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
8837        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8838                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", "Cannot execute command %s on a secondary command buffer.",
8839                             cmd_name.c_str());
8840    }
8841    return skip_call;
8842}
8843
8844static void TransitionFinalSubpassLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin) {
8845    auto renderPass = getRenderPass(dev_data, pRenderPassBegin->renderPass);
8846    if (!renderPass)
8847        return;
8848
8849    const VkRenderPassCreateInfo *pRenderPassInfo = renderPass->pCreateInfo;
8850    auto framebuffer = getFramebuffer(dev_data, pRenderPassBegin->framebuffer);
8851    if (!framebuffer)
8852        return;
8853
8854    for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
8855        const VkImageView &image_view = framebuffer->createInfo.pAttachments[i];
8856        SetLayout(dev_data, pCB, image_view, pRenderPassInfo->pAttachments[i].finalLayout);
8857    }
8858}
8859
8860static bool VerifyRenderAreaBounds(const layer_data *my_data, const VkRenderPassBeginInfo *pRenderPassBegin) {
8861    bool skip_call = false;
8862    const VkFramebufferCreateInfo *pFramebufferInfo = &my_data->frameBufferMap.at(pRenderPassBegin->framebuffer).createInfo;
8863    if (pRenderPassBegin->renderArea.offset.x < 0 ||
8864        (pRenderPassBegin->renderArea.offset.x + pRenderPassBegin->renderArea.extent.width) > pFramebufferInfo->width ||
8865        pRenderPassBegin->renderArea.offset.y < 0 ||
8866        (pRenderPassBegin->renderArea.offset.y + pRenderPassBegin->renderArea.extent.height) > pFramebufferInfo->height) {
8867        skip_call |= static_cast<bool>(log_msg(
8868            my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8869            DRAWSTATE_INVALID_RENDER_AREA, "CORE",
8870            "Cannot execute a render pass with renderArea not within the bound of the "
8871            "framebuffer. RenderArea: x %d, y %d, width %d, height %d. Framebuffer: width %d, "
8872            "height %d.",
8873            pRenderPassBegin->renderArea.offset.x, pRenderPassBegin->renderArea.offset.y, pRenderPassBegin->renderArea.extent.width,
8874            pRenderPassBegin->renderArea.extent.height, pFramebufferInfo->width, pFramebufferInfo->height));
8875    }
8876    return skip_call;
8877}
8878
8879VKAPI_ATTR void VKAPI_CALL
8880CmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, VkSubpassContents contents) {
8881    bool skipCall = false;
8882    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8883    std::unique_lock<std::mutex> lock(global_lock);
8884    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8885    auto renderPass = pRenderPassBegin ? getRenderPass(dev_data, pRenderPassBegin->renderPass) : nullptr;
8886    auto framebuffer = pRenderPassBegin ? getFramebuffer(dev_data, pRenderPassBegin->framebuffer) : nullptr;
8887    if (pCB) {
8888        if (renderPass) {
8889            uint32_t clear_op_count = 0;
8890            pCB->activeFramebuffer = pRenderPassBegin->framebuffer;
8891            for (size_t i = 0; i < renderPass->attachments.size(); ++i) {
8892                MT_FB_ATTACHMENT_INFO &fb_info = framebuffer->attachments[i];
8893                if (renderPass->attachments[i].load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
8894                    ++clear_op_count;
8895                    std::function<bool()> function = [=]() {
8896                        set_memory_valid(dev_data, fb_info.mem, true, fb_info.image);
8897                        return false;
8898                    };
8899                    pCB->validate_functions.push_back(function);
8900                } else if (renderPass->attachments[i].load_op == VK_ATTACHMENT_LOAD_OP_DONT_CARE) {
8901                    std::function<bool()> function = [=]() {
8902                        set_memory_valid(dev_data, fb_info.mem, false, fb_info.image);
8903                        return false;
8904                    };
8905                    pCB->validate_functions.push_back(function);
8906                } else if (renderPass->attachments[i].load_op == VK_ATTACHMENT_LOAD_OP_LOAD) {
8907                    std::function<bool()> function = [=]() {
8908                        return validate_memory_is_valid(dev_data, fb_info.mem, "vkCmdBeginRenderPass()", fb_info.image);
8909                    };
8910                    pCB->validate_functions.push_back(function);
8911                }
8912                if (renderPass->attachment_first_read[renderPass->attachments[i].attachment]) {
8913                    std::function<bool()> function = [=]() {
8914                        return validate_memory_is_valid(dev_data, fb_info.mem, "vkCmdBeginRenderPass()", fb_info.image);
8915                    };
8916                    pCB->validate_functions.push_back(function);
8917                }
8918            }
8919            if (clear_op_count > pRenderPassBegin->clearValueCount) {
8920                skipCall |= log_msg(
8921                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
8922                    reinterpret_cast<uint64_t &>(renderPass), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
8923                    "In vkCmdBeginRenderPass() the VkRenderPassBeginInfo struct has a clearValueCount of %u but the actual number "
8924                    "of attachments in renderPass 0x%" PRIx64 " that use VK_ATTACHMENT_LOAD_OP_CLEAR is %u. The clearValueCount "
8925                                                              "must therefore be greater than or equal to %u.",
8926                    pRenderPassBegin->clearValueCount, reinterpret_cast<uint64_t &>(renderPass), clear_op_count, clear_op_count);
8927            }
8928            skipCall |= VerifyRenderAreaBounds(dev_data, pRenderPassBegin);
8929            skipCall |= VerifyFramebufferAndRenderPassLayouts(dev_data, pCB, pRenderPassBegin);
8930            skipCall |= insideRenderPass(dev_data, pCB, "vkCmdBeginRenderPass");
8931            skipCall |= ValidateDependencies(dev_data, framebuffer, renderPass);
8932            pCB->activeRenderPass = renderPass;
8933            skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdBeginRenderPass");
8934            skipCall |= addCmd(dev_data, pCB, CMD_BEGINRENDERPASS, "vkCmdBeginRenderPass()");
8935            // This is a shallow copy as that is all that is needed for now
8936            pCB->activeRenderPassBeginInfo = *pRenderPassBegin;
8937            pCB->activeSubpass = 0;
8938            pCB->activeSubpassContents = contents;
8939            pCB->framebuffers.insert(pRenderPassBegin->framebuffer);
8940            // Connect this framebuffer to this cmdBuffer
8941            framebuffer->referencingCmdBuffers.insert(pCB->commandBuffer);
8942        } else {
8943            skipCall |=
8944                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8945                            DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot use a NULL RenderPass object in vkCmdBeginRenderPass()");
8946        }
8947    }
8948    lock.unlock();
8949    if (!skipCall) {
8950        dev_data->device_dispatch_table->CmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
8951    }
8952}
8953
8954VKAPI_ATTR void VKAPI_CALL CmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
8955    bool skipCall = false;
8956    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8957    std::unique_lock<std::mutex> lock(global_lock);
8958    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8959    if (pCB) {
8960        skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdNextSubpass");
8961        skipCall |= addCmd(dev_data, pCB, CMD_NEXTSUBPASS, "vkCmdNextSubpass()");
8962        pCB->activeSubpass++;
8963        pCB->activeSubpassContents = contents;
8964        TransitionSubpassLayouts(dev_data, pCB, &pCB->activeRenderPassBeginInfo, pCB->activeSubpass);
8965        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdNextSubpass");
8966    }
8967    lock.unlock();
8968    if (!skipCall)
8969        dev_data->device_dispatch_table->CmdNextSubpass(commandBuffer, contents);
8970}
8971
8972VKAPI_ATTR void VKAPI_CALL CmdEndRenderPass(VkCommandBuffer commandBuffer) {
8973    bool skipCall = false;
8974    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8975    std::unique_lock<std::mutex> lock(global_lock);
8976    auto pCB = getCBNode(dev_data, commandBuffer);
8977    if (pCB) {
8978        RENDER_PASS_NODE* pRPNode = pCB->activeRenderPass;
8979        auto framebuffer = getFramebuffer(dev_data, pCB->activeFramebuffer);
8980        if (pRPNode) {
8981            for (size_t i = 0; i < pRPNode->attachments.size(); ++i) {
8982                MT_FB_ATTACHMENT_INFO &fb_info = framebuffer->attachments[i];
8983                if (pRPNode->attachments[i].store_op == VK_ATTACHMENT_STORE_OP_STORE) {
8984                    std::function<bool()> function = [=]() {
8985                        set_memory_valid(dev_data, fb_info.mem, true, fb_info.image);
8986                        return false;
8987                    };
8988                    pCB->validate_functions.push_back(function);
8989                } else if (pRPNode->attachments[i].store_op == VK_ATTACHMENT_STORE_OP_DONT_CARE) {
8990                    std::function<bool()> function = [=]() {
8991                        set_memory_valid(dev_data, fb_info.mem, false, fb_info.image);
8992                        return false;
8993                    };
8994                    pCB->validate_functions.push_back(function);
8995                }
8996            }
8997        }
8998        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdEndRenderpass");
8999        skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdEndRenderPass");
9000        skipCall |= addCmd(dev_data, pCB, CMD_ENDRENDERPASS, "vkCmdEndRenderPass()");
9001        TransitionFinalSubpassLayouts(dev_data, pCB, &pCB->activeRenderPassBeginInfo);
9002        pCB->activeRenderPass = nullptr;
9003        pCB->activeSubpass = 0;
9004        pCB->activeFramebuffer = VK_NULL_HANDLE;
9005    }
9006    lock.unlock();
9007    if (!skipCall)
9008        dev_data->device_dispatch_table->CmdEndRenderPass(commandBuffer);
9009}
9010
9011static bool logInvalidAttachmentMessage(layer_data *dev_data, VkCommandBuffer secondaryBuffer, RENDER_PASS_NODE const *secondaryPass,
9012                                        RENDER_PASS_NODE const *primaryPass, uint32_t primaryAttach, uint32_t secondaryAttach,
9013                                        const char *msg) {
9014    return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9015                   DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9016                   "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p which has a render pass 0x%" PRIx64
9017                   " that is not compatible with the current render pass 0x%" PRIx64 "."
9018                   "Attachment %" PRIu32 " is not compatible with %" PRIu32 ". %s",
9019                   (void *)secondaryBuffer, (uint64_t)(secondaryPass->renderPass), (uint64_t)(primaryPass->renderPass), primaryAttach, secondaryAttach,
9020                   msg);
9021}
9022
9023static bool validateAttachmentCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, RENDER_PASS_NODE const *primaryPass,
9024                                            uint32_t primaryAttach, VkCommandBuffer secondaryBuffer, RENDER_PASS_NODE const *secondaryPass,
9025                                            uint32_t secondaryAttach, bool is_multi) {
9026    bool skip_call = false;
9027    if (primaryPass->pCreateInfo->attachmentCount <= primaryAttach) {
9028        primaryAttach = VK_ATTACHMENT_UNUSED;
9029    }
9030    if (secondaryPass->pCreateInfo->attachmentCount <= secondaryAttach) {
9031        secondaryAttach = VK_ATTACHMENT_UNUSED;
9032    }
9033    if (primaryAttach == VK_ATTACHMENT_UNUSED && secondaryAttach == VK_ATTACHMENT_UNUSED) {
9034        return skip_call;
9035    }
9036    if (primaryAttach == VK_ATTACHMENT_UNUSED) {
9037        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9038                                                 secondaryAttach, "The first is unused while the second is not.");
9039        return skip_call;
9040    }
9041    if (secondaryAttach == VK_ATTACHMENT_UNUSED) {
9042        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9043                                                 secondaryAttach, "The second is unused while the first is not.");
9044        return skip_call;
9045    }
9046    if (primaryPass->pCreateInfo->pAttachments[primaryAttach].format !=
9047        secondaryPass->pCreateInfo->pAttachments[secondaryAttach].format) {
9048        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9049                                                 secondaryAttach, "They have different formats.");
9050    }
9051    if (primaryPass->pCreateInfo->pAttachments[primaryAttach].samples !=
9052        secondaryPass->pCreateInfo->pAttachments[secondaryAttach].samples) {
9053        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9054                                                 secondaryAttach, "They have different samples.");
9055    }
9056    if (is_multi &&
9057        primaryPass->pCreateInfo->pAttachments[primaryAttach].flags !=
9058            secondaryPass->pCreateInfo->pAttachments[secondaryAttach].flags) {
9059        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9060                                                 secondaryAttach, "They have different flags.");
9061    }
9062    return skip_call;
9063}
9064
9065static bool validateSubpassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, RENDER_PASS_NODE const *primaryPass,
9066                                         VkCommandBuffer secondaryBuffer, RENDER_PASS_NODE const *secondaryPass, const int subpass,
9067                                         bool is_multi) {
9068    bool skip_call = false;
9069    const VkSubpassDescription &primary_desc = primaryPass->pCreateInfo->pSubpasses[subpass];
9070    const VkSubpassDescription &secondary_desc = secondaryPass->pCreateInfo->pSubpasses[subpass];
9071    uint32_t maxInputAttachmentCount = std::max(primary_desc.inputAttachmentCount, secondary_desc.inputAttachmentCount);
9072    for (uint32_t i = 0; i < maxInputAttachmentCount; ++i) {
9073        uint32_t primary_input_attach = VK_ATTACHMENT_UNUSED, secondary_input_attach = VK_ATTACHMENT_UNUSED;
9074        if (i < primary_desc.inputAttachmentCount) {
9075            primary_input_attach = primary_desc.pInputAttachments[i].attachment;
9076        }
9077        if (i < secondary_desc.inputAttachmentCount) {
9078            secondary_input_attach = secondary_desc.pInputAttachments[i].attachment;
9079        }
9080        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_input_attach, secondaryBuffer,
9081                                                     secondaryPass, secondary_input_attach, is_multi);
9082    }
9083    uint32_t maxColorAttachmentCount = std::max(primary_desc.colorAttachmentCount, secondary_desc.colorAttachmentCount);
9084    for (uint32_t i = 0; i < maxColorAttachmentCount; ++i) {
9085        uint32_t primary_color_attach = VK_ATTACHMENT_UNUSED, secondary_color_attach = VK_ATTACHMENT_UNUSED;
9086        if (i < primary_desc.colorAttachmentCount) {
9087            primary_color_attach = primary_desc.pColorAttachments[i].attachment;
9088        }
9089        if (i < secondary_desc.colorAttachmentCount) {
9090            secondary_color_attach = secondary_desc.pColorAttachments[i].attachment;
9091        }
9092        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_color_attach, secondaryBuffer,
9093                                                     secondaryPass, secondary_color_attach, is_multi);
9094        uint32_t primary_resolve_attach = VK_ATTACHMENT_UNUSED, secondary_resolve_attach = VK_ATTACHMENT_UNUSED;
9095        if (i < primary_desc.colorAttachmentCount && primary_desc.pResolveAttachments) {
9096            primary_resolve_attach = primary_desc.pResolveAttachments[i].attachment;
9097        }
9098        if (i < secondary_desc.colorAttachmentCount && secondary_desc.pResolveAttachments) {
9099            secondary_resolve_attach = secondary_desc.pResolveAttachments[i].attachment;
9100        }
9101        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_resolve_attach, secondaryBuffer,
9102                                                     secondaryPass, secondary_resolve_attach, is_multi);
9103    }
9104    uint32_t primary_depthstencil_attach = VK_ATTACHMENT_UNUSED, secondary_depthstencil_attach = VK_ATTACHMENT_UNUSED;
9105    if (primary_desc.pDepthStencilAttachment) {
9106        primary_depthstencil_attach = primary_desc.pDepthStencilAttachment[0].attachment;
9107    }
9108    if (secondary_desc.pDepthStencilAttachment) {
9109        secondary_depthstencil_attach = secondary_desc.pDepthStencilAttachment[0].attachment;
9110    }
9111    skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_depthstencil_attach, secondaryBuffer,
9112                                                 secondaryPass, secondary_depthstencil_attach, is_multi);
9113    return skip_call;
9114}
9115
9116static bool validateRenderPassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, VkRenderPass primaryPass,
9117                                            VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass) {
9118    bool skip_call = false;
9119    // Early exit if renderPass objects are identical (and therefore compatible)
9120    if (primaryPass == secondaryPass)
9121        return skip_call;
9122    auto primary_render_pass = getRenderPass(dev_data, primaryPass);
9123    auto secondary_render_pass = getRenderPass(dev_data, secondaryPass);
9124    if (!primary_render_pass) {
9125        skip_call |=
9126            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9127                    DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9128                    "vkCmdExecuteCommands() called w/ invalid current Cmd Buffer 0x%p which has invalid render pass 0x%" PRIx64 ".",
9129                    (void *)primaryBuffer, (uint64_t)(primaryPass));
9130        return skip_call;
9131    }
9132    if (!secondary_render_pass) {
9133        skip_call |=
9134            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9135                    DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9136                    "vkCmdExecuteCommands() called w/ invalid secondary Cmd Buffer 0x%p which has invalid render pass 0x%" PRIx64 ".",
9137                    (void *)secondaryBuffer, (uint64_t)(secondaryPass));
9138        return skip_call;
9139    }
9140    if (primary_render_pass->pCreateInfo->subpassCount != secondary_render_pass->pCreateInfo->subpassCount) {
9141        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9142                             DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9143                             "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p which has a render pass 0x%" PRIx64
9144                             " that is not compatible with the current render pass 0x%" PRIx64 "."
9145                             "They have a different number of subpasses.",
9146                             (void *)secondaryBuffer, (uint64_t)(secondaryPass), (uint64_t)(primaryPass));
9147        return skip_call;
9148    }
9149    auto subpassCount = primary_render_pass->pCreateInfo->subpassCount;
9150    for (uint32_t i = 0; i < subpassCount; ++i) {
9151        skip_call |= validateSubpassCompatibility(dev_data, primaryBuffer, primary_render_pass, secondaryBuffer,
9152                                                  secondary_render_pass, i, subpassCount > 1);
9153    }
9154    return skip_call;
9155}
9156
9157static bool validateFramebuffer(layer_data *dev_data, VkCommandBuffer primaryBuffer, const GLOBAL_CB_NODE *pCB,
9158                                VkCommandBuffer secondaryBuffer, const GLOBAL_CB_NODE *pSubCB) {
9159    bool skip_call = false;
9160    if (!pSubCB->beginInfo.pInheritanceInfo) {
9161        return skip_call;
9162    }
9163    VkFramebuffer primary_fb = pCB->activeFramebuffer;
9164    VkFramebuffer secondary_fb = pSubCB->beginInfo.pInheritanceInfo->framebuffer;
9165    if (secondary_fb != VK_NULL_HANDLE) {
9166        if (primary_fb != secondary_fb) {
9167            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9168                                 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9169                                 "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p which has a framebuffer 0x%" PRIx64
9170                                 " that is not compatible with the current framebuffer 0x%" PRIx64 ".",
9171                                 (void *)secondaryBuffer, (uint64_t)(secondary_fb), (uint64_t)(primary_fb));
9172        }
9173        auto fb = getFramebuffer(dev_data, secondary_fb);
9174        if (!fb) {
9175            skip_call |=
9176                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9177                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
9178                                                                          "which has invalid framebuffer 0x%" PRIx64 ".",
9179                        (void *)secondaryBuffer, (uint64_t)(secondary_fb));
9180            return skip_call;
9181        }
9182        skip_call |= validateRenderPassCompatibility(dev_data, secondaryBuffer, fb->createInfo.renderPass,
9183                                                     secondaryBuffer, pSubCB->beginInfo.pInheritanceInfo->renderPass);
9184    }
9185    return skip_call;
9186}
9187
9188static bool validateSecondaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, GLOBAL_CB_NODE *pSubCB) {
9189    bool skipCall = false;
9190    unordered_set<int> activeTypes;
9191    for (auto queryObject : pCB->activeQueries) {
9192        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
9193        if (queryPoolData != dev_data->queryPoolMap.end()) {
9194            if (queryPoolData->second.createInfo.queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS &&
9195                pSubCB->beginInfo.pInheritanceInfo) {
9196                VkQueryPipelineStatisticFlags cmdBufStatistics = pSubCB->beginInfo.pInheritanceInfo->pipelineStatistics;
9197                if ((cmdBufStatistics & queryPoolData->second.createInfo.pipelineStatistics) != cmdBufStatistics) {
9198                    skipCall |= log_msg(
9199                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9200                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9201                        "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
9202                        "which has invalid active query pool 0x%" PRIx64 ". Pipeline statistics is being queried so the command "
9203                        "buffer must have all bits set on the queryPool.",
9204                        reinterpret_cast<void *>(pCB->commandBuffer), reinterpret_cast<const uint64_t &>(queryPoolData->first));
9205                }
9206            }
9207            activeTypes.insert(queryPoolData->second.createInfo.queryType);
9208        }
9209    }
9210    for (auto queryObject : pSubCB->startedQueries) {
9211        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
9212        if (queryPoolData != dev_data->queryPoolMap.end() && activeTypes.count(queryPoolData->second.createInfo.queryType)) {
9213            skipCall |=
9214                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9215                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9216                        "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
9217                        "which has invalid active query pool 0x%" PRIx64 "of type %d but a query of that type has been started on "
9218                        "secondary Cmd Buffer 0x%p.",
9219                        reinterpret_cast<void *>(pCB->commandBuffer), reinterpret_cast<const uint64_t &>(queryPoolData->first),
9220                        queryPoolData->second.createInfo.queryType, reinterpret_cast<void *>(pSubCB->commandBuffer));
9221        }
9222    }
9223    return skipCall;
9224}
9225
9226VKAPI_ATTR void VKAPI_CALL
9227CmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount, const VkCommandBuffer *pCommandBuffers) {
9228    bool skipCall = false;
9229    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9230    std::unique_lock<std::mutex> lock(global_lock);
9231    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9232    if (pCB) {
9233        GLOBAL_CB_NODE *pSubCB = NULL;
9234        for (uint32_t i = 0; i < commandBuffersCount; i++) {
9235            pSubCB = getCBNode(dev_data, pCommandBuffers[i]);
9236            if (!pSubCB) {
9237                skipCall |=
9238                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9239                            DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9240                            "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p in element %u of pCommandBuffers array.",
9241                            (void *)pCommandBuffers[i], i);
9242            } else if (VK_COMMAND_BUFFER_LEVEL_PRIMARY == pSubCB->createInfo.level) {
9243                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9244                                    __LINE__, DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9245                                    "vkCmdExecuteCommands() called w/ Primary Cmd Buffer 0x%p in element %u of pCommandBuffers "
9246                                    "array. All cmd buffers in pCommandBuffers array must be secondary.",
9247                                    (void *)pCommandBuffers[i], i);
9248            } else if (pCB->activeRenderPass) { // Secondary CB w/i RenderPass must have *CONTINUE_BIT set
9249                if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
9250                    skipCall |= log_msg(
9251                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9252                        (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
9253                        "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) executed within render pass (0x%" PRIxLEAST64
9254                        ") must have had vkBeginCommandBuffer() called w/ VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set.",
9255                        (void *)pCommandBuffers[i], (uint64_t)pCB->activeRenderPass->renderPass);
9256                } else {
9257                    // Make sure render pass is compatible with parent command buffer pass if has continue
9258                    skipCall |= validateRenderPassCompatibility(dev_data, commandBuffer, pCB->activeRenderPass->renderPass, pCommandBuffers[i],
9259                                                                pSubCB->beginInfo.pInheritanceInfo->renderPass);
9260                    skipCall |= validateFramebuffer(dev_data, commandBuffer, pCB, pCommandBuffers[i], pSubCB);
9261                }
9262                string errorString = "";
9263                if (!verify_renderpass_compatibility(dev_data, pCB->activeRenderPass->renderPass,
9264                                                     pSubCB->beginInfo.pInheritanceInfo->renderPass, errorString)) {
9265                    skipCall |= log_msg(
9266                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9267                        (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
9268                        "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) w/ render pass (0x%" PRIxLEAST64
9269                        ") is incompatible w/ primary command buffer (0x%p) w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
9270                        (void *)pCommandBuffers[i], (uint64_t)pSubCB->beginInfo.pInheritanceInfo->renderPass, (void *)commandBuffer,
9271                        (uint64_t)pCB->activeRenderPass->renderPass, errorString.c_str());
9272                }
9273                //  If framebuffer for secondary CB is not NULL, then it must match FB from vkCmdBeginRenderPass()
9274                //   that this CB will be executed in AND framebuffer must have been created w/ RP compatible w/ renderpass
9275                if (pSubCB->beginInfo.pInheritanceInfo->framebuffer) {
9276                    if (pSubCB->beginInfo.pInheritanceInfo->framebuffer != pCB->activeRenderPassBeginInfo.framebuffer) {
9277                        skipCall |= log_msg(
9278                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9279                            (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_FRAMEBUFFER_INCOMPATIBLE, "DS",
9280                            "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) references framebuffer (0x%" PRIxLEAST64
9281                            ") that does not match framebuffer (0x%" PRIxLEAST64 ") in active renderpass (0x%" PRIxLEAST64 ").",
9282                            (void *)pCommandBuffers[i], (uint64_t)pSubCB->beginInfo.pInheritanceInfo->framebuffer,
9283                            (uint64_t)pCB->activeRenderPassBeginInfo.framebuffer, (uint64_t)pCB->activeRenderPass->renderPass);
9284                    }
9285                }
9286            }
9287            // TODO(mlentine): Move more logic into this method
9288            skipCall |= validateSecondaryCommandBufferState(dev_data, pCB, pSubCB);
9289            skipCall |= validateCommandBufferState(dev_data, pSubCB);
9290            // Secondary cmdBuffers are considered pending execution starting w/
9291            // being recorded
9292            if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
9293                if (dev_data->globalInFlightCmdBuffers.find(pSubCB->commandBuffer) != dev_data->globalInFlightCmdBuffers.end()) {
9294                    skipCall |= log_msg(
9295                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9296                        (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
9297                        "Attempt to simultaneously execute CB 0x%" PRIxLEAST64 " w/o VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
9298                        "set!",
9299                        (uint64_t)(pCB->commandBuffer));
9300                }
9301                if (pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) {
9302                    // Warn that non-simultaneous secondary cmd buffer renders primary non-simultaneous
9303                    skipCall |= log_msg(
9304                        dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9305                        (uint64_t)(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
9306                        "vkCmdExecuteCommands(): Secondary Command Buffer (0x%" PRIxLEAST64
9307                        ") does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary command buffer "
9308                        "(0x%" PRIxLEAST64 ") to be treated as if it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
9309                                          "set, even though it does.",
9310                        (uint64_t)(pCommandBuffers[i]), (uint64_t)(pCB->commandBuffer));
9311                    pCB->beginInfo.flags &= ~VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
9312                }
9313            }
9314            if (!pCB->activeQueries.empty() && !dev_data->phys_dev_properties.features.inheritedQueries) {
9315                skipCall |=
9316                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9317                            reinterpret_cast<uint64_t>(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
9318                            "vkCmdExecuteCommands(): Secondary Command Buffer "
9319                            "(0x%" PRIxLEAST64 ") cannot be submitted with a query in "
9320                            "flight and inherited queries not "
9321                            "supported on this device.",
9322                            reinterpret_cast<uint64_t>(pCommandBuffers[i]));
9323            }
9324            pSubCB->primaryCommandBuffer = pCB->commandBuffer;
9325            pCB->secondaryCommandBuffers.insert(pSubCB->commandBuffer);
9326            dev_data->globalInFlightCmdBuffers.insert(pSubCB->commandBuffer);
9327        }
9328        skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdExecuteComands");
9329        skipCall |= addCmd(dev_data, pCB, CMD_EXECUTECOMMANDS, "vkCmdExecuteComands()");
9330    }
9331    lock.unlock();
9332    if (!skipCall)
9333        dev_data->device_dispatch_table->CmdExecuteCommands(commandBuffer, commandBuffersCount, pCommandBuffers);
9334}
9335
9336static bool ValidateMapImageLayouts(VkDevice device, VkDeviceMemory mem) {
9337    bool skip_call = false;
9338    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9339    auto mem_info = getMemObjInfo(dev_data, mem);
9340    if ((mem_info) && (mem_info->image != VK_NULL_HANDLE)) {
9341        std::vector<VkImageLayout> layouts;
9342        if (FindLayouts(dev_data, mem_info->image, layouts)) {
9343            for (auto layout : layouts) {
9344                if (layout != VK_IMAGE_LAYOUT_PREINITIALIZED && layout != VK_IMAGE_LAYOUT_GENERAL) {
9345                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9346                                         __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot map an image with layout %s. Only "
9347                                                                                         "GENERAL or PREINITIALIZED are supported.",
9348                                         string_VkImageLayout(layout));
9349                }
9350            }
9351        }
9352    }
9353    return skip_call;
9354}
9355
9356VKAPI_ATTR VkResult VKAPI_CALL
9357MapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags, void **ppData) {
9358    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9359
9360    bool skip_call = false;
9361    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9362    std::unique_lock<std::mutex> lock(global_lock);
9363#if MTMERGESOURCE
9364    DEVICE_MEM_INFO *pMemObj = getMemObjInfo(dev_data, mem);
9365    if (pMemObj) {
9366        pMemObj->valid = true;
9367        if ((dev_data->phys_dev_mem_props.memoryTypes[pMemObj->allocInfo.memoryTypeIndex].propertyFlags &
9368             VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
9369            skip_call =
9370                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
9371                        (uint64_t)mem, __LINE__, MEMTRACK_INVALID_STATE, "MEM",
9372                        "Mapping Memory without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set: mem obj 0x%" PRIxLEAST64, (uint64_t)mem);
9373        }
9374    }
9375    skip_call |= validateMemRange(dev_data, mem, offset, size);
9376#endif
9377    skip_call |= ValidateMapImageLayouts(device, mem);
9378    lock.unlock();
9379
9380    if (!skip_call) {
9381        result = dev_data->device_dispatch_table->MapMemory(device, mem, offset, size, flags, ppData);
9382        if (VK_SUCCESS == result) {
9383#if MTMERGESOURCE
9384            lock.lock();
9385            storeMemRanges(dev_data, mem, offset, size);
9386            initializeAndTrackMemory(dev_data, mem, size, ppData);
9387            lock.unlock();
9388#endif
9389        }
9390    }
9391    return result;
9392}
9393
9394VKAPI_ATTR void VKAPI_CALL UnmapMemory(VkDevice device, VkDeviceMemory mem) {
9395    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9396    bool skipCall = false;
9397
9398    std::unique_lock<std::mutex> lock(global_lock);
9399    skipCall |= deleteMemRanges(my_data, mem);
9400    lock.unlock();
9401    if (!skipCall) {
9402        my_data->device_dispatch_table->UnmapMemory(device, mem);
9403    }
9404}
9405
9406static bool validateMemoryIsMapped(layer_data *my_data, const char *funcName, uint32_t memRangeCount,
9407                                   const VkMappedMemoryRange *pMemRanges) {
9408    bool skipCall = false;
9409    for (uint32_t i = 0; i < memRangeCount; ++i) {
9410        auto mem_info = getMemObjInfo(my_data, pMemRanges[i].memory);
9411        if (mem_info) {
9412            if (mem_info->memRange.offset > pMemRanges[i].offset) {
9413                skipCall |=
9414                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
9415                            (uint64_t)pMemRanges[i].memory, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
9416                            "%s: Flush/Invalidate offset (" PRINTF_SIZE_T_SPECIFIER ") is less than Memory Object's offset "
9417                            "(" PRINTF_SIZE_T_SPECIFIER ").",
9418                            funcName, static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(mem_info->memRange.offset));
9419            }
9420
9421            const uint64_t my_dataTerminus =
9422                    (mem_info->memRange.size == VK_WHOLE_SIZE) ? mem_info->allocInfo.allocationSize :
9423                                                                           (mem_info->memRange.offset + mem_info->memRange.size);
9424            if (pMemRanges[i].size != VK_WHOLE_SIZE && (my_dataTerminus < (pMemRanges[i].offset + pMemRanges[i].size))) {
9425                skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9426                                    VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
9427                                    MEMTRACK_INVALID_MAP, "MEM", "%s: Flush/Invalidate upper-bound (" PRINTF_SIZE_T_SPECIFIER
9428                                                                 ") exceeds the Memory Object's upper-bound "
9429                                                                 "(" PRINTF_SIZE_T_SPECIFIER ").",
9430                                    funcName, static_cast<size_t>(pMemRanges[i].offset + pMemRanges[i].size),
9431                                    static_cast<size_t>(my_dataTerminus));
9432            }
9433        }
9434    }
9435    return skipCall;
9436}
9437
9438static bool validateAndCopyNoncoherentMemoryToDriver(layer_data *my_data, uint32_t memRangeCount,
9439                                                     const VkMappedMemoryRange *pMemRanges) {
9440    bool skipCall = false;
9441    for (uint32_t i = 0; i < memRangeCount; ++i) {
9442        auto mem_info = getMemObjInfo(my_data, pMemRanges[i].memory);
9443        if (mem_info) {
9444            if (mem_info->pData) {
9445                VkDeviceSize size = mem_info->memRange.size;
9446                VkDeviceSize half_size = (size / 2);
9447                char *data = static_cast<char *>(mem_info->pData);
9448                for (auto j = 0; j < half_size; ++j) {
9449                    if (data[j] != NoncoherentMemoryFillValue) {
9450                        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9451                                            VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
9452                                            MEMTRACK_INVALID_MAP, "MEM", "Memory overflow was detected on mem obj 0x%" PRIxLEAST64,
9453                                            (uint64_t)pMemRanges[i].memory);
9454                    }
9455                }
9456                for (auto j = size + half_size; j < 2 * size; ++j) {
9457                    if (data[j] != NoncoherentMemoryFillValue) {
9458                        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9459                                            VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
9460                                            MEMTRACK_INVALID_MAP, "MEM", "Memory overflow was detected on mem obj 0x%" PRIxLEAST64,
9461                                            (uint64_t)pMemRanges[i].memory);
9462                    }
9463                }
9464                memcpy(mem_info->pDriverData, static_cast<void *>(data + (size_t)(half_size)), (size_t)(size));
9465            }
9466        }
9467    }
9468    return skipCall;
9469}
9470
9471VkResult VKAPI_CALL
9472FlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) {
9473    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9474    bool skipCall = false;
9475    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9476
9477    std::unique_lock<std::mutex> lock(global_lock);
9478    skipCall |= validateAndCopyNoncoherentMemoryToDriver(my_data, memRangeCount, pMemRanges);
9479    skipCall |= validateMemoryIsMapped(my_data, "vkFlushMappedMemoryRanges", memRangeCount, pMemRanges);
9480    lock.unlock();
9481    if (!skipCall) {
9482        result = my_data->device_dispatch_table->FlushMappedMemoryRanges(device, memRangeCount, pMemRanges);
9483    }
9484    return result;
9485}
9486
9487VkResult VKAPI_CALL
9488InvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) {
9489    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9490    bool skipCall = false;
9491    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9492
9493    std::unique_lock<std::mutex> lock(global_lock);
9494    skipCall |= validateMemoryIsMapped(my_data, "vkInvalidateMappedMemoryRanges", memRangeCount, pMemRanges);
9495    lock.unlock();
9496    if (!skipCall) {
9497        result = my_data->device_dispatch_table->InvalidateMappedMemoryRanges(device, memRangeCount, pMemRanges);
9498    }
9499    return result;
9500}
9501
9502VKAPI_ATTR VkResult VKAPI_CALL BindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
9503    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9504    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9505    bool skipCall = false;
9506    std::unique_lock<std::mutex> lock(global_lock);
9507    auto image_node = getImageNode(dev_data, image);
9508    if (image_node) {
9509        // Track objects tied to memory
9510        uint64_t image_handle = reinterpret_cast<uint64_t &>(image);
9511        skipCall = set_mem_binding(dev_data, mem, image_handle, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "vkBindImageMemory");
9512        VkMemoryRequirements memRequirements;
9513        lock.unlock();
9514        dev_data->device_dispatch_table->GetImageMemoryRequirements(device, image, &memRequirements);
9515        lock.lock();
9516
9517        // Track and validate bound memory range information
9518        auto mem_info = getMemObjInfo(dev_data, mem);
9519        if (mem_info) {
9520            const MEMORY_RANGE range =
9521                insert_memory_ranges(image_handle, mem, memoryOffset, memRequirements, mem_info->imageRanges);
9522            skipCall |= validate_memory_range(dev_data, mem_info->bufferRanges, range, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
9523        }
9524
9525        print_mem_list(dev_data);
9526        lock.unlock();
9527        if (!skipCall) {
9528            result = dev_data->device_dispatch_table->BindImageMemory(device, image, mem, memoryOffset);
9529            lock.lock();
9530            dev_data->memObjMap[mem].get()->image = image;
9531            image_node->mem = mem;
9532            image_node->memOffset = memoryOffset;
9533            image_node->memSize = memRequirements.size;
9534            lock.unlock();
9535        }
9536    } else {
9537        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
9538                reinterpret_cast<const uint64_t &>(image), __LINE__, MEMTRACK_INVALID_OBJECT, "MT",
9539                "vkBindImageMemory: Cannot find invalid image 0x%" PRIx64 ", has it already been deleted?",
9540                reinterpret_cast<const uint64_t &>(image));
9541    }
9542    return result;
9543}
9544
9545VKAPI_ATTR VkResult VKAPI_CALL SetEvent(VkDevice device, VkEvent event) {
9546    bool skip_call = false;
9547    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9548    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9549    std::unique_lock<std::mutex> lock(global_lock);
9550    auto event_node = dev_data->eventMap.find(event);
9551    if (event_node != dev_data->eventMap.end()) {
9552        event_node->second.needsSignaled = false;
9553        event_node->second.stageMask = VK_PIPELINE_STAGE_HOST_BIT;
9554        if (event_node->second.write_in_use) {
9555            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
9556                                 reinterpret_cast<const uint64_t &>(event), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
9557                                 "Cannot call vkSetEvent() on event 0x%" PRIxLEAST64 " that is already in use by a command buffer.",
9558                                 reinterpret_cast<const uint64_t &>(event));
9559        }
9560    }
9561    lock.unlock();
9562    // Host setting event is visible to all queues immediately so update stageMask for any queue that's seen this event
9563    // TODO : For correctness this needs separate fix to verify that app doesn't make incorrect assumptions about the
9564    // ordering of this command in relation to vkCmd[Set|Reset]Events (see GH297)
9565    for (auto queue_data : dev_data->queueMap) {
9566        auto event_entry = queue_data.second.eventToStageMap.find(event);
9567        if (event_entry != queue_data.second.eventToStageMap.end()) {
9568            event_entry->second |= VK_PIPELINE_STAGE_HOST_BIT;
9569        }
9570    }
9571    if (!skip_call)
9572        result = dev_data->device_dispatch_table->SetEvent(device, event);
9573    return result;
9574}
9575
9576VKAPI_ATTR VkResult VKAPI_CALL
9577QueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo, VkFence fence) {
9578    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
9579    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9580    bool skip_call = false;
9581    std::unique_lock<std::mutex> lock(global_lock);
9582    auto pFence = getFenceNode(dev_data, fence);
9583    auto pQueue = getQueueNode(dev_data, queue);
9584
9585    // First verify that fence is not in use
9586    skip_call |= ValidateFenceForSubmit(dev_data, pFence);
9587
9588    if (fence != VK_NULL_HANDLE) {
9589        SubmitFence(pQueue, pFence);
9590    }
9591
9592    for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) {
9593        const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx];
9594        // Track objects tied to memory
9595        for (uint32_t j = 0; j < bindInfo.bufferBindCount; j++) {
9596            for (uint32_t k = 0; k < bindInfo.pBufferBinds[j].bindCount; k++) {
9597                if (set_sparse_mem_binding(dev_data, bindInfo.pBufferBinds[j].pBinds[k].memory,
9598                                           (uint64_t)bindInfo.pBufferBinds[j].buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
9599                                           "vkQueueBindSparse"))
9600                    skip_call = true;
9601            }
9602        }
9603        for (uint32_t j = 0; j < bindInfo.imageOpaqueBindCount; j++) {
9604            for (uint32_t k = 0; k < bindInfo.pImageOpaqueBinds[j].bindCount; k++) {
9605                if (set_sparse_mem_binding(dev_data, bindInfo.pImageOpaqueBinds[j].pBinds[k].memory,
9606                                           (uint64_t)bindInfo.pImageOpaqueBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
9607                                           "vkQueueBindSparse"))
9608                    skip_call = true;
9609            }
9610        }
9611        for (uint32_t j = 0; j < bindInfo.imageBindCount; j++) {
9612            for (uint32_t k = 0; k < bindInfo.pImageBinds[j].bindCount; k++) {
9613                if (set_sparse_mem_binding(dev_data, bindInfo.pImageBinds[j].pBinds[k].memory,
9614                                           (uint64_t)bindInfo.pImageBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
9615                                           "vkQueueBindSparse"))
9616                    skip_call = true;
9617            }
9618        }
9619        for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) {
9620            const VkSemaphore &semaphore = bindInfo.pWaitSemaphores[i];
9621            if (dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) {
9622                if (dev_data->semaphoreMap[semaphore].signaled) {
9623                    dev_data->semaphoreMap[semaphore].signaled = false;
9624                } else {
9625                    skip_call |=
9626                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
9627                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
9628                                "vkQueueBindSparse: Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64
9629                                " that has no way to be signaled.",
9630                                reinterpret_cast<const uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
9631                }
9632            }
9633        }
9634        for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) {
9635            const VkSemaphore &semaphore = bindInfo.pSignalSemaphores[i];
9636            if (dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) {
9637                if (dev_data->semaphoreMap[semaphore].signaled) {
9638                    skip_call =
9639                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
9640                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
9641                                "vkQueueBindSparse: Queue 0x%" PRIx64 " is signaling semaphore 0x%" PRIx64
9642                                ", but that semaphore is already signaled.",
9643                                reinterpret_cast<const uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
9644                }
9645                dev_data->semaphoreMap[semaphore].signaled = true;
9646            }
9647        }
9648    }
9649    print_mem_list(dev_data);
9650    lock.unlock();
9651
9652    if (!skip_call)
9653        return dev_data->device_dispatch_table->QueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
9654
9655    return result;
9656}
9657
9658VKAPI_ATTR VkResult VKAPI_CALL CreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo,
9659                                               const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore) {
9660    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9661    VkResult result = dev_data->device_dispatch_table->CreateSemaphore(device, pCreateInfo, pAllocator, pSemaphore);
9662    if (result == VK_SUCCESS) {
9663        std::lock_guard<std::mutex> lock(global_lock);
9664        SEMAPHORE_NODE* sNode = &dev_data->semaphoreMap[*pSemaphore];
9665        sNode->signaled = false;
9666        sNode->queue = VK_NULL_HANDLE;
9667        sNode->in_use.store(0);
9668    }
9669    return result;
9670}
9671
9672VKAPI_ATTR VkResult VKAPI_CALL
9673CreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkEvent *pEvent) {
9674    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9675    VkResult result = dev_data->device_dispatch_table->CreateEvent(device, pCreateInfo, pAllocator, pEvent);
9676    if (result == VK_SUCCESS) {
9677        std::lock_guard<std::mutex> lock(global_lock);
9678        dev_data->eventMap[*pEvent].needsSignaled = false;
9679        dev_data->eventMap[*pEvent].in_use.store(0);
9680        dev_data->eventMap[*pEvent].write_in_use = 0;
9681        dev_data->eventMap[*pEvent].stageMask = VkPipelineStageFlags(0);
9682    }
9683    return result;
9684}
9685
9686VKAPI_ATTR VkResult VKAPI_CALL CreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
9687                                                  const VkAllocationCallbacks *pAllocator,
9688                                                  VkSwapchainKHR *pSwapchain) {
9689    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9690    VkResult result = dev_data->device_dispatch_table->CreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);
9691
9692    if (VK_SUCCESS == result) {
9693        std::lock_guard<std::mutex> lock(global_lock);
9694        dev_data->device_extensions.swapchainMap[*pSwapchain] = unique_ptr<SWAPCHAIN_NODE>(new SWAPCHAIN_NODE(pCreateInfo));
9695    }
9696
9697    return result;
9698}
9699
9700VKAPI_ATTR void VKAPI_CALL
9701DestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
9702    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9703    bool skipCall = false;
9704
9705    std::unique_lock<std::mutex> lock(global_lock);
9706    auto swapchain_data = getSwapchainNode(dev_data, swapchain);
9707    if (swapchain_data) {
9708        if (swapchain_data->images.size() > 0) {
9709            for (auto swapchain_image : swapchain_data->images) {
9710                auto image_sub = dev_data->imageSubresourceMap.find(swapchain_image);
9711                if (image_sub != dev_data->imageSubresourceMap.end()) {
9712                    for (auto imgsubpair : image_sub->second) {
9713                        auto image_item = dev_data->imageLayoutMap.find(imgsubpair);
9714                        if (image_item != dev_data->imageLayoutMap.end()) {
9715                            dev_data->imageLayoutMap.erase(image_item);
9716                        }
9717                    }
9718                    dev_data->imageSubresourceMap.erase(image_sub);
9719                }
9720                skipCall = clear_object_binding(dev_data, (uint64_t)swapchain_image,
9721                                                VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT);
9722                dev_data->imageMap.erase(swapchain_image);
9723            }
9724        }
9725        dev_data->device_extensions.swapchainMap.erase(swapchain);
9726    }
9727    lock.unlock();
9728    if (!skipCall)
9729        dev_data->device_dispatch_table->DestroySwapchainKHR(device, swapchain, pAllocator);
9730}
9731
9732VKAPI_ATTR VkResult VKAPI_CALL
9733GetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pCount, VkImage *pSwapchainImages) {
9734    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9735    VkResult result = dev_data->device_dispatch_table->GetSwapchainImagesKHR(device, swapchain, pCount, pSwapchainImages);
9736
9737    if (result == VK_SUCCESS && pSwapchainImages != NULL) {
9738        // This should never happen and is checked by param checker.
9739        if (!pCount)
9740            return result;
9741        std::lock_guard<std::mutex> lock(global_lock);
9742        const size_t count = *pCount;
9743        auto swapchain_node = getSwapchainNode(dev_data, swapchain);
9744        if (swapchain_node && !swapchain_node->images.empty()) {
9745            // TODO : Not sure I like the memcmp here, but it works
9746            const bool mismatch = (swapchain_node->images.size() != count ||
9747                                   memcmp(&swapchain_node->images[0], pSwapchainImages, sizeof(swapchain_node->images[0]) * count));
9748            if (mismatch) {
9749                // TODO: Verify against Valid Usage section of extension
9750                log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
9751                        (uint64_t)swapchain, __LINE__, MEMTRACK_NONE, "SWAP_CHAIN",
9752                        "vkGetSwapchainInfoKHR(0x%" PRIx64
9753                        ", VK_SWAP_CHAIN_INFO_TYPE_PERSISTENT_IMAGES_KHR) returned mismatching data",
9754                        (uint64_t)(swapchain));
9755            }
9756        }
9757        for (uint32_t i = 0; i < *pCount; ++i) {
9758            IMAGE_LAYOUT_NODE image_layout_node;
9759            image_layout_node.layout = VK_IMAGE_LAYOUT_UNDEFINED;
9760            image_layout_node.format = swapchain_node->createInfo.imageFormat;
9761            // Add imageMap entries for each swapchain image
9762            VkImageCreateInfo image_ci = {};
9763            image_ci.mipLevels = 1;
9764            image_ci.arrayLayers = swapchain_node->createInfo.imageArrayLayers;
9765            image_ci.usage = swapchain_node->createInfo.imageUsage;
9766            image_ci.format = swapchain_node->createInfo.imageFormat;
9767            image_ci.extent.width = swapchain_node->createInfo.imageExtent.width;
9768            image_ci.extent.height = swapchain_node->createInfo.imageExtent.height;
9769            image_ci.sharingMode = swapchain_node->createInfo.imageSharingMode;
9770            dev_data->imageMap[pSwapchainImages[i]] = unique_ptr<IMAGE_NODE>(new IMAGE_NODE(&image_ci));
9771            auto &image_node = dev_data->imageMap[pSwapchainImages[i]];
9772            image_node->valid = false;
9773            image_node->mem = MEMTRACKER_SWAP_CHAIN_IMAGE_KEY;
9774            swapchain_node->images.push_back(pSwapchainImages[i]);
9775            ImageSubresourcePair subpair = {pSwapchainImages[i], false, VkImageSubresource()};
9776            dev_data->imageSubresourceMap[pSwapchainImages[i]].push_back(subpair);
9777            dev_data->imageLayoutMap[subpair] = image_layout_node;
9778            dev_data->device_extensions.imageToSwapchainMap[pSwapchainImages[i]] = swapchain;
9779        }
9780    }
9781    return result;
9782}
9783
9784VKAPI_ATTR VkResult VKAPI_CALL QueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) {
9785    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
9786    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9787    bool skip_call = false;
9788
9789    if (pPresentInfo) {
9790        std::lock_guard<std::mutex> lock(global_lock);
9791        for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
9792            const VkSemaphore &semaphore = pPresentInfo->pWaitSemaphores[i];
9793            if (dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) {
9794                if (dev_data->semaphoreMap[semaphore].signaled) {
9795                    dev_data->semaphoreMap[semaphore].signaled = false;
9796                } else {
9797                    skip_call |=
9798                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9799                                VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
9800                                "Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.",
9801                                reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
9802                }
9803            }
9804        }
9805        VkDeviceMemory mem;
9806        for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
9807            auto swapchain_data = getSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
9808            if (swapchain_data && pPresentInfo->pImageIndices[i] < swapchain_data->images.size()) {
9809                VkImage image = swapchain_data->images[pPresentInfo->pImageIndices[i]];
9810#if MTMERGESOURCE
9811                skip_call |=
9812                    get_mem_binding_from_object(dev_data, (uint64_t)(image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
9813                skip_call |= validate_memory_is_valid(dev_data, mem, "vkQueuePresentKHR()", image);
9814#endif
9815                vector<VkImageLayout> layouts;
9816                if (FindLayouts(dev_data, image, layouts)) {
9817                    for (auto layout : layouts) {
9818                        if (layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) {
9819                            skip_call |=
9820                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
9821                                        reinterpret_cast<uint64_t &>(queue), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9822                                        "Images passed to present must be in layout "
9823                                        "PRESENT_SOURCE_KHR but is in %s",
9824                                        string_VkImageLayout(layout));
9825                        }
9826                    }
9827                }
9828            }
9829        }
9830    }
9831
9832    if (!skip_call)
9833        result = dev_data->device_dispatch_table->QueuePresentKHR(queue, pPresentInfo);
9834
9835    return result;
9836}
9837
9838VKAPI_ATTR VkResult VKAPI_CALL AcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
9839                                                   VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
9840    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9841    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9842    bool skipCall = false;
9843
9844    std::unique_lock<std::mutex> lock(global_lock);
9845    if (semaphore != VK_NULL_HANDLE &&
9846        dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) {
9847        if (dev_data->semaphoreMap[semaphore].signaled) {
9848            skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
9849                               reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
9850                               "vkAcquireNextImageKHR: Semaphore must not be currently signaled or in a wait state");
9851        }
9852        dev_data->semaphoreMap[semaphore].signaled = true;
9853    }
9854    auto fence_data = dev_data->fenceMap.find(fence);
9855    if (fence_data != dev_data->fenceMap.end()) {
9856        fence_data->second.swapchain = swapchain;
9857    }
9858    lock.unlock();
9859
9860    if (!skipCall) {
9861        result =
9862            dev_data->device_dispatch_table->AcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex);
9863    }
9864
9865    return result;
9866}
9867
9868VKAPI_ATTR VkResult VKAPI_CALL
9869CreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
9870                             const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) {
9871    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
9872    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
9873    VkResult res = pTable->CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
9874    if (VK_SUCCESS == res) {
9875        std::lock_guard<std::mutex> lock(global_lock);
9876        res = layer_create_msg_callback(my_data->report_data, false, pCreateInfo, pAllocator, pMsgCallback);
9877    }
9878    return res;
9879}
9880
9881VKAPI_ATTR void VKAPI_CALL DestroyDebugReportCallbackEXT(VkInstance instance,
9882                                                         VkDebugReportCallbackEXT msgCallback,
9883                                                         const VkAllocationCallbacks *pAllocator) {
9884    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
9885    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
9886    pTable->DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
9887    std::lock_guard<std::mutex> lock(global_lock);
9888    layer_destroy_msg_callback(my_data->report_data, msgCallback, pAllocator);
9889}
9890
9891VKAPI_ATTR void VKAPI_CALL
9892DebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object,
9893                      size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
9894    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
9895    my_data->instance_dispatch_table->DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix,
9896                                                            pMsg);
9897}
9898
9899VKAPI_ATTR VkResult VKAPI_CALL
9900EnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
9901    return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
9902}
9903
9904VKAPI_ATTR VkResult VKAPI_CALL
9905EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties *pProperties) {
9906    return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
9907}
9908
9909VKAPI_ATTR VkResult VKAPI_CALL
9910EnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties *pProperties) {
9911    if (pLayerName && !strcmp(pLayerName, global_layer.layerName))
9912        return util_GetExtensionProperties(1, instance_extensions, pCount, pProperties);
9913
9914    return VK_ERROR_LAYER_NOT_PRESENT;
9915}
9916
9917VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
9918                                                                  const char *pLayerName, uint32_t *pCount,
9919                                                                  VkExtensionProperties *pProperties) {
9920    if (pLayerName && !strcmp(pLayerName, global_layer.layerName))
9921        return util_GetExtensionProperties(0, NULL, pCount, pProperties);
9922
9923    assert(physicalDevice);
9924
9925    dispatch_key key = get_dispatch_key(physicalDevice);
9926    layer_data *my_data = get_my_data_ptr(key, layer_data_map);
9927    return my_data->instance_dispatch_table->EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties);
9928}
9929
9930static PFN_vkVoidFunction
9931intercept_core_instance_command(const char *name);
9932
9933static PFN_vkVoidFunction
9934intercept_core_device_command(const char *name);
9935
9936static PFN_vkVoidFunction
9937intercept_khr_swapchain_command(const char *name, VkDevice dev);
9938
9939VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetDeviceProcAddr(VkDevice dev, const char *funcName) {
9940    PFN_vkVoidFunction proc = intercept_core_device_command(funcName);
9941    if (proc)
9942        return proc;
9943
9944    assert(dev);
9945
9946    proc = intercept_khr_swapchain_command(funcName, dev);
9947    if (proc)
9948        return proc;
9949
9950    layer_data *dev_data;
9951    dev_data = get_my_data_ptr(get_dispatch_key(dev), layer_data_map);
9952
9953    VkLayerDispatchTable *pTable = dev_data->device_dispatch_table;
9954    {
9955        if (pTable->GetDeviceProcAddr == NULL)
9956            return NULL;
9957        return pTable->GetDeviceProcAddr(dev, funcName);
9958    }
9959}
9960
9961VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetInstanceProcAddr(VkInstance instance, const char *funcName) {
9962    PFN_vkVoidFunction proc = intercept_core_instance_command(funcName);
9963    if (!proc)
9964        proc = intercept_core_device_command(funcName);
9965    if (!proc)
9966        proc = intercept_khr_swapchain_command(funcName, VK_NULL_HANDLE);
9967    if (proc)
9968        return proc;
9969
9970    assert(instance);
9971
9972    layer_data *my_data;
9973    my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
9974    proc = debug_report_get_instance_proc_addr(my_data->report_data, funcName);
9975    if (proc)
9976        return proc;
9977
9978    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
9979    if (pTable->GetInstanceProcAddr == NULL)
9980        return NULL;
9981    return pTable->GetInstanceProcAddr(instance, funcName);
9982}
9983
9984static PFN_vkVoidFunction
9985intercept_core_instance_command(const char *name) {
9986    static const struct {
9987        const char *name;
9988        PFN_vkVoidFunction proc;
9989    } core_instance_commands[] = {
9990        { "vkGetInstanceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetInstanceProcAddr) },
9991        { "vkGetDeviceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceProcAddr) },
9992        { "vkCreateInstance", reinterpret_cast<PFN_vkVoidFunction>(CreateInstance) },
9993        { "vkCreateDevice", reinterpret_cast<PFN_vkVoidFunction>(CreateDevice) },
9994        { "vkDestroyInstance", reinterpret_cast<PFN_vkVoidFunction>(DestroyInstance) },
9995        { "vkEnumerateInstanceLayerProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateInstanceLayerProperties) },
9996        { "vkEnumerateDeviceLayerProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateDeviceLayerProperties) },
9997        { "vkEnumerateInstanceExtensionProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateInstanceExtensionProperties) },
9998        { "vkEnumerateDeviceExtensionProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateDeviceExtensionProperties) },
9999    };
10000
10001    for (size_t i = 0; i < ARRAY_SIZE(core_instance_commands); i++) {
10002        if (!strcmp(core_instance_commands[i].name, name))
10003            return core_instance_commands[i].proc;
10004    }
10005
10006    return nullptr;
10007}
10008
10009static PFN_vkVoidFunction
10010intercept_core_device_command(const char *name) {
10011    static const struct {
10012        const char *name;
10013        PFN_vkVoidFunction proc;
10014    } core_device_commands[] = {
10015        {"vkGetDeviceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceProcAddr)},
10016        {"vkQueueSubmit", reinterpret_cast<PFN_vkVoidFunction>(QueueSubmit)},
10017        {"vkWaitForFences", reinterpret_cast<PFN_vkVoidFunction>(WaitForFences)},
10018        {"vkGetFenceStatus", reinterpret_cast<PFN_vkVoidFunction>(GetFenceStatus)},
10019        {"vkQueueWaitIdle", reinterpret_cast<PFN_vkVoidFunction>(QueueWaitIdle)},
10020        {"vkDeviceWaitIdle", reinterpret_cast<PFN_vkVoidFunction>(DeviceWaitIdle)},
10021        {"vkGetDeviceQueue", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceQueue)},
10022        {"vkDestroyInstance", reinterpret_cast<PFN_vkVoidFunction>(DestroyInstance)},
10023        {"vkDestroyDevice", reinterpret_cast<PFN_vkVoidFunction>(DestroyDevice)},
10024        {"vkDestroyFence", reinterpret_cast<PFN_vkVoidFunction>(DestroyFence)},
10025        {"vkResetFences", reinterpret_cast<PFN_vkVoidFunction>(ResetFences)},
10026        {"vkDestroySemaphore", reinterpret_cast<PFN_vkVoidFunction>(DestroySemaphore)},
10027        {"vkDestroyEvent", reinterpret_cast<PFN_vkVoidFunction>(DestroyEvent)},
10028        {"vkDestroyQueryPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyQueryPool)},
10029        {"vkDestroyBuffer", reinterpret_cast<PFN_vkVoidFunction>(DestroyBuffer)},
10030        {"vkDestroyBufferView", reinterpret_cast<PFN_vkVoidFunction>(DestroyBufferView)},
10031        {"vkDestroyImage", reinterpret_cast<PFN_vkVoidFunction>(DestroyImage)},
10032        {"vkDestroyImageView", reinterpret_cast<PFN_vkVoidFunction>(DestroyImageView)},
10033        {"vkDestroyShaderModule", reinterpret_cast<PFN_vkVoidFunction>(DestroyShaderModule)},
10034        {"vkDestroyPipeline", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipeline)},
10035        {"vkDestroyPipelineLayout", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipelineLayout)},
10036        {"vkDestroySampler", reinterpret_cast<PFN_vkVoidFunction>(DestroySampler)},
10037        {"vkDestroyDescriptorSetLayout", reinterpret_cast<PFN_vkVoidFunction>(DestroyDescriptorSetLayout)},
10038        {"vkDestroyDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyDescriptorPool)},
10039        {"vkDestroyFramebuffer", reinterpret_cast<PFN_vkVoidFunction>(DestroyFramebuffer)},
10040        {"vkDestroyRenderPass", reinterpret_cast<PFN_vkVoidFunction>(DestroyRenderPass)},
10041        {"vkCreateBuffer", reinterpret_cast<PFN_vkVoidFunction>(CreateBuffer)},
10042        {"vkCreateBufferView", reinterpret_cast<PFN_vkVoidFunction>(CreateBufferView)},
10043        {"vkCreateImage", reinterpret_cast<PFN_vkVoidFunction>(CreateImage)},
10044        {"vkCreateImageView", reinterpret_cast<PFN_vkVoidFunction>(CreateImageView)},
10045        {"vkCreateFence", reinterpret_cast<PFN_vkVoidFunction>(CreateFence)},
10046        {"vkCreatePipelineCache", reinterpret_cast<PFN_vkVoidFunction>(CreatePipelineCache)},
10047        {"vkDestroyPipelineCache", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipelineCache)},
10048        {"vkGetPipelineCacheData", reinterpret_cast<PFN_vkVoidFunction>(GetPipelineCacheData)},
10049        {"vkMergePipelineCaches", reinterpret_cast<PFN_vkVoidFunction>(MergePipelineCaches)},
10050        {"vkCreateGraphicsPipelines", reinterpret_cast<PFN_vkVoidFunction>(CreateGraphicsPipelines)},
10051        {"vkCreateComputePipelines", reinterpret_cast<PFN_vkVoidFunction>(CreateComputePipelines)},
10052        {"vkCreateSampler", reinterpret_cast<PFN_vkVoidFunction>(CreateSampler)},
10053        {"vkCreateDescriptorSetLayout", reinterpret_cast<PFN_vkVoidFunction>(CreateDescriptorSetLayout)},
10054        {"vkCreatePipelineLayout", reinterpret_cast<PFN_vkVoidFunction>(CreatePipelineLayout)},
10055        {"vkCreateDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(CreateDescriptorPool)},
10056        {"vkResetDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(ResetDescriptorPool)},
10057        {"vkAllocateDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(AllocateDescriptorSets)},
10058        {"vkFreeDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(FreeDescriptorSets)},
10059        {"vkUpdateDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(UpdateDescriptorSets)},
10060        {"vkCreateCommandPool", reinterpret_cast<PFN_vkVoidFunction>(CreateCommandPool)},
10061        {"vkDestroyCommandPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyCommandPool)},
10062        {"vkResetCommandPool", reinterpret_cast<PFN_vkVoidFunction>(ResetCommandPool)},
10063        {"vkCreateQueryPool", reinterpret_cast<PFN_vkVoidFunction>(CreateQueryPool)},
10064        {"vkAllocateCommandBuffers", reinterpret_cast<PFN_vkVoidFunction>(AllocateCommandBuffers)},
10065        {"vkFreeCommandBuffers", reinterpret_cast<PFN_vkVoidFunction>(FreeCommandBuffers)},
10066        {"vkBeginCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(BeginCommandBuffer)},
10067        {"vkEndCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(EndCommandBuffer)},
10068        {"vkResetCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(ResetCommandBuffer)},
10069        {"vkCmdBindPipeline", reinterpret_cast<PFN_vkVoidFunction>(CmdBindPipeline)},
10070        {"vkCmdSetViewport", reinterpret_cast<PFN_vkVoidFunction>(CmdSetViewport)},
10071        {"vkCmdSetScissor", reinterpret_cast<PFN_vkVoidFunction>(CmdSetScissor)},
10072        {"vkCmdSetLineWidth", reinterpret_cast<PFN_vkVoidFunction>(CmdSetLineWidth)},
10073        {"vkCmdSetDepthBias", reinterpret_cast<PFN_vkVoidFunction>(CmdSetDepthBias)},
10074        {"vkCmdSetBlendConstants", reinterpret_cast<PFN_vkVoidFunction>(CmdSetBlendConstants)},
10075        {"vkCmdSetDepthBounds", reinterpret_cast<PFN_vkVoidFunction>(CmdSetDepthBounds)},
10076        {"vkCmdSetStencilCompareMask", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilCompareMask)},
10077        {"vkCmdSetStencilWriteMask", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilWriteMask)},
10078        {"vkCmdSetStencilReference", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilReference)},
10079        {"vkCmdBindDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(CmdBindDescriptorSets)},
10080        {"vkCmdBindVertexBuffers", reinterpret_cast<PFN_vkVoidFunction>(CmdBindVertexBuffers)},
10081        {"vkCmdBindIndexBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdBindIndexBuffer)},
10082        {"vkCmdDraw", reinterpret_cast<PFN_vkVoidFunction>(CmdDraw)},
10083        {"vkCmdDrawIndexed", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndexed)},
10084        {"vkCmdDrawIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndirect)},
10085        {"vkCmdDrawIndexedIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndexedIndirect)},
10086        {"vkCmdDispatch", reinterpret_cast<PFN_vkVoidFunction>(CmdDispatch)},
10087        {"vkCmdDispatchIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDispatchIndirect)},
10088        {"vkCmdCopyBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyBuffer)},
10089        {"vkCmdCopyImage", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyImage)},
10090        {"vkCmdBlitImage", reinterpret_cast<PFN_vkVoidFunction>(CmdBlitImage)},
10091        {"vkCmdCopyBufferToImage", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyBufferToImage)},
10092        {"vkCmdCopyImageToBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyImageToBuffer)},
10093        {"vkCmdUpdateBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdUpdateBuffer)},
10094        {"vkCmdFillBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdFillBuffer)},
10095        {"vkCmdClearColorImage", reinterpret_cast<PFN_vkVoidFunction>(CmdClearColorImage)},
10096        {"vkCmdClearDepthStencilImage", reinterpret_cast<PFN_vkVoidFunction>(CmdClearDepthStencilImage)},
10097        {"vkCmdClearAttachments", reinterpret_cast<PFN_vkVoidFunction>(CmdClearAttachments)},
10098        {"vkCmdResolveImage", reinterpret_cast<PFN_vkVoidFunction>(CmdResolveImage)},
10099        {"vkCmdSetEvent", reinterpret_cast<PFN_vkVoidFunction>(CmdSetEvent)},
10100        {"vkCmdResetEvent", reinterpret_cast<PFN_vkVoidFunction>(CmdResetEvent)},
10101        {"vkCmdWaitEvents", reinterpret_cast<PFN_vkVoidFunction>(CmdWaitEvents)},
10102        {"vkCmdPipelineBarrier", reinterpret_cast<PFN_vkVoidFunction>(CmdPipelineBarrier)},
10103        {"vkCmdBeginQuery", reinterpret_cast<PFN_vkVoidFunction>(CmdBeginQuery)},
10104        {"vkCmdEndQuery", reinterpret_cast<PFN_vkVoidFunction>(CmdEndQuery)},
10105        {"vkCmdResetQueryPool", reinterpret_cast<PFN_vkVoidFunction>(CmdResetQueryPool)},
10106        {"vkCmdCopyQueryPoolResults", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyQueryPoolResults)},
10107        {"vkCmdPushConstants", reinterpret_cast<PFN_vkVoidFunction>(CmdPushConstants)},
10108        {"vkCmdWriteTimestamp", reinterpret_cast<PFN_vkVoidFunction>(CmdWriteTimestamp)},
10109        {"vkCreateFramebuffer", reinterpret_cast<PFN_vkVoidFunction>(CreateFramebuffer)},
10110        {"vkCreateShaderModule", reinterpret_cast<PFN_vkVoidFunction>(CreateShaderModule)},
10111        {"vkCreateRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CreateRenderPass)},
10112        {"vkCmdBeginRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CmdBeginRenderPass)},
10113        {"vkCmdNextSubpass", reinterpret_cast<PFN_vkVoidFunction>(CmdNextSubpass)},
10114        {"vkCmdEndRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CmdEndRenderPass)},
10115        {"vkCmdExecuteCommands", reinterpret_cast<PFN_vkVoidFunction>(CmdExecuteCommands)},
10116        {"vkSetEvent", reinterpret_cast<PFN_vkVoidFunction>(SetEvent)},
10117        {"vkMapMemory", reinterpret_cast<PFN_vkVoidFunction>(MapMemory)},
10118        {"vkUnmapMemory", reinterpret_cast<PFN_vkVoidFunction>(UnmapMemory)},
10119        {"vkFlushMappedMemoryRanges", reinterpret_cast<PFN_vkVoidFunction>(FlushMappedMemoryRanges)},
10120        {"vkInvalidateMappedMemoryRanges", reinterpret_cast<PFN_vkVoidFunction>(InvalidateMappedMemoryRanges)},
10121        {"vkAllocateMemory", reinterpret_cast<PFN_vkVoidFunction>(AllocateMemory)},
10122        {"vkFreeMemory", reinterpret_cast<PFN_vkVoidFunction>(FreeMemory)},
10123        {"vkBindBufferMemory", reinterpret_cast<PFN_vkVoidFunction>(BindBufferMemory)},
10124        {"vkGetBufferMemoryRequirements", reinterpret_cast<PFN_vkVoidFunction>(GetBufferMemoryRequirements)},
10125        {"vkGetImageMemoryRequirements", reinterpret_cast<PFN_vkVoidFunction>(GetImageMemoryRequirements)},
10126        {"vkGetQueryPoolResults", reinterpret_cast<PFN_vkVoidFunction>(GetQueryPoolResults)},
10127        {"vkBindImageMemory", reinterpret_cast<PFN_vkVoidFunction>(BindImageMemory)},
10128        {"vkQueueBindSparse", reinterpret_cast<PFN_vkVoidFunction>(QueueBindSparse)},
10129        {"vkCreateSemaphore", reinterpret_cast<PFN_vkVoidFunction>(CreateSemaphore)},
10130        {"vkCreateEvent", reinterpret_cast<PFN_vkVoidFunction>(CreateEvent)},
10131    };
10132
10133    for (size_t i = 0; i < ARRAY_SIZE(core_device_commands); i++) {
10134        if (!strcmp(core_device_commands[i].name, name))
10135            return core_device_commands[i].proc;
10136    }
10137
10138    return nullptr;
10139}
10140
10141static PFN_vkVoidFunction
10142intercept_khr_swapchain_command(const char *name, VkDevice dev) {
10143    static const struct {
10144        const char *name;
10145        PFN_vkVoidFunction proc;
10146    } khr_swapchain_commands[] = {
10147        { "vkCreateSwapchainKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateSwapchainKHR) },
10148        { "vkDestroySwapchainKHR", reinterpret_cast<PFN_vkVoidFunction>(DestroySwapchainKHR) },
10149        { "vkGetSwapchainImagesKHR", reinterpret_cast<PFN_vkVoidFunction>(GetSwapchainImagesKHR) },
10150        { "vkAcquireNextImageKHR", reinterpret_cast<PFN_vkVoidFunction>(AcquireNextImageKHR) },
10151        { "vkQueuePresentKHR", reinterpret_cast<PFN_vkVoidFunction>(QueuePresentKHR) },
10152    };
10153
10154    if (dev) {
10155        layer_data *dev_data = get_my_data_ptr(get_dispatch_key(dev), layer_data_map);
10156        if (!dev_data->device_extensions.wsi_enabled)
10157            return nullptr;
10158    }
10159
10160    for (size_t i = 0; i < ARRAY_SIZE(khr_swapchain_commands); i++) {
10161        if (!strcmp(khr_swapchain_commands[i].name, name))
10162            return khr_swapchain_commands[i].proc;
10163    }
10164
10165    return nullptr;
10166}
10167
10168} // namespace core_validation
10169
10170// vk_layer_logging.h expects these to be defined
10171
10172VKAPI_ATTR VkResult VKAPI_CALL
10173vkCreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
10174                               const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) {
10175    return core_validation::CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
10176}
10177
10178VKAPI_ATTR void VKAPI_CALL
10179vkDestroyDebugReportCallbackEXT(VkInstance instance,
10180                                VkDebugReportCallbackEXT msgCallback,
10181                                const VkAllocationCallbacks *pAllocator) {
10182    core_validation::DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
10183}
10184
10185VKAPI_ATTR void VKAPI_CALL
10186vkDebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object,
10187                        size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
10188    core_validation::DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, pMsg);
10189}
10190
10191// loader-layer interface v0, just wrappers since there is only a layer
10192
10193VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
10194vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties *pProperties) {
10195    return core_validation::EnumerateInstanceExtensionProperties(pLayerName, pCount, pProperties);
10196}
10197
10198VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
10199vkEnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
10200    return core_validation::EnumerateInstanceLayerProperties(pCount, pProperties);
10201}
10202
10203VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
10204vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties *pProperties) {
10205    // the layer command handles VK_NULL_HANDLE just fine internally
10206    assert(physicalDevice == VK_NULL_HANDLE);
10207    return core_validation::EnumerateDeviceLayerProperties(VK_NULL_HANDLE, pCount, pProperties);
10208}
10209
10210VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
10211                                                                                    const char *pLayerName, uint32_t *pCount,
10212                                                                                    VkExtensionProperties *pProperties) {
10213    // the layer command handles VK_NULL_HANDLE just fine internally
10214    assert(physicalDevice == VK_NULL_HANDLE);
10215    return core_validation::EnumerateDeviceExtensionProperties(VK_NULL_HANDLE, pLayerName, pCount, pProperties);
10216}
10217
10218VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) {
10219    return core_validation::GetDeviceProcAddr(dev, funcName);
10220}
10221
10222VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) {
10223    return core_validation::GetInstanceProcAddr(instance, funcName);
10224}
10225